ddr3_training_hw_algo.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include <common.h>
  6. #include <spl.h>
  7. #include <asm/io.h>
  8. #include <asm/arch/cpu.h>
  9. #include <asm/arch/soc.h>
  10. #include "ddr3_init.h"
  11. #define VREF_INITIAL_STEP 3
  12. #define VREF_SECOND_STEP 1
  13. #define VREF_MAX_INDEX 7
  14. #define MAX_VALUE (1024 - 1)
  15. #define MIN_VALUE (-MAX_VALUE)
  16. #define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0x1f)
  17. u32 ck_delay = (u32)-1, ck_delay_16 = (u32)-1;
  18. u32 ca_delay;
  19. int ddr3_tip_centr_skip_min_win_check = 0;
  20. u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  21. u8 last_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  22. u16 current_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  23. u16 last_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  24. u8 lim_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  25. u8 interface_state[MAX_INTERFACE_NUM];
  26. u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  27. u8 vref_window_size_th = 12;
  28. static u8 pup_st[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  29. static u32 rd_sample_mask[] = {
  30. 0,
  31. 8,
  32. 16,
  33. 24
  34. };
  35. #define VREF_STEP_1 0
  36. #define VREF_STEP_2 1
  37. #define VREF_CONVERGE 2
  38. /*
  39. * ODT additional timing
  40. */
  41. int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
  42. {
  43. u32 cs_num = 0, max_cs = 0, max_read_sample = 0, min_read_sample = 0x1f;
  44. u32 data_read[MAX_INTERFACE_NUM] = { 0 };
  45. u32 read_sample[MAX_CS_NUM];
  46. u32 val;
  47. u32 pup_index;
  48. int max_phase = MIN_VALUE, current_phase;
  49. enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
  50. struct hws_topology_map *tm = ddr3_get_topology_map();
  51. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  52. DUNIT_ODT_CONTROL_REG,
  53. 0 << 8, 0x3 << 8));
  54. CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id,
  55. READ_DATA_SAMPLE_DELAY,
  56. data_read, MASK_ALL_BITS));
  57. val = data_read[if_id];
  58. max_cs = hws_ddr3_tip_max_cs_get();
  59. for (cs_num = 0; cs_num < max_cs; cs_num++) {
  60. read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num);
  61. /* find maximum of read_samples */
  62. if (read_sample[cs_num] >= max_read_sample) {
  63. if (read_sample[cs_num] == max_read_sample) {
  64. /* search for max phase */;
  65. } else {
  66. max_read_sample = read_sample[cs_num];
  67. max_phase = MIN_VALUE;
  68. }
  69. for (pup_index = 0;
  70. pup_index < tm->num_of_bus_per_interface;
  71. pup_index++) {
  72. CHECK_STATUS(ddr3_tip_bus_read
  73. (dev_num, if_id,
  74. ACCESS_TYPE_UNICAST, pup_index,
  75. DDR_PHY_DATA,
  76. RL_PHY_REG + CS_REG_VALUE(cs_num),
  77. &val));
  78. current_phase = ((int)val & 0xe0) >> 6;
  79. if (current_phase >= max_phase)
  80. max_phase = current_phase;
  81. }
  82. }
  83. /* find minimum */
  84. if (read_sample[cs_num] < min_read_sample)
  85. min_read_sample = read_sample[cs_num];
  86. }
  87. if (min_read_sample <= tm->interface_params[if_id].cas_l) {
  88. min_read_sample = (int)tm->interface_params[if_id].cas_l;
  89. }
  90. min_read_sample = min_read_sample - 1;
  91. max_read_sample = max_read_sample + 4 + (max_phase + 1) / 2 + 1;
  92. if (max_read_sample >= 0x1f)
  93. max_read_sample = 0x1f;
  94. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  95. ODT_TIMING_LOW,
  96. ((min_read_sample - 1) << 12),
  97. 0xf << 12));
  98. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  99. ODT_TIMING_LOW,
  100. (max_read_sample << 16),
  101. 0x1f << 16));
  102. return MV_OK;
  103. }
  104. int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
  105. {
  106. u32 reg_pup = RESULT_DB_PHY_REG_ADDR;
  107. u32 reg_data;
  108. u32 cs_num;
  109. int i;
  110. cs_num = 0;
  111. /* TBD */
  112. reg_pup += cs_num;
  113. for (i = 0; i < 4; i++) {
  114. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  115. ACCESS_TYPE_UNICAST, i,
  116. DDR_PHY_DATA, reg_pup,
  117. &reg_data));
  118. res[i] = (reg_data >> RESULT_DB_PHY_REG_RX_OFFSET) & 0x1f;
  119. }
  120. return 0;
  121. }
  122. /*
  123. * This algorithm deals with the vertical optimum from Voltage point of view
  124. * of the sample signal.
  125. * Voltage sample point can improve the Eye / window size of the bit and the
  126. * pup.
  127. * The problem is that it is tune for all DQ the same so there isn't any
  128. * PBS like code.
  129. * It is more like centralization.
  130. * But because we don't have The training SM support we do it a bit more
  131. * smart search to save time.
  132. */
  133. int ddr3_tip_vref(u32 dev_num)
  134. {
  135. /*
  136. * The Vref register have non linear order. Need to check what will be
  137. * in future projects.
  138. */
  139. u32 vref_map[8] = {
  140. 1, 2, 3, 4, 5, 6, 7, 0
  141. };
  142. /* State and parameter definitions */
  143. u32 initial_step = VREF_INITIAL_STEP;
  144. /* need to be assign with minus ????? */
  145. u32 second_step = VREF_SECOND_STEP;
  146. u32 algo_run_flag = 0, currrent_vref = 0;
  147. u32 while_count = 0;
  148. u32 pup = 0, if_id = 0, num_pup = 0, rep = 0;
  149. u32 val = 0;
  150. u32 reg_addr = 0xa8;
  151. u32 copy_start_pattern, copy_end_pattern;
  152. enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
  153. u8 res[4];
  154. struct hws_topology_map *tm = ddr3_get_topology_map();
  155. CHECK_STATUS(ddr3_tip_special_rx(dev_num));
  156. /* save start/end pattern */
  157. copy_start_pattern = start_pattern;
  158. copy_end_pattern = end_pattern;
  159. /* set vref as centralization pattern */
  160. start_pattern = PATTERN_VREF;
  161. end_pattern = PATTERN_VREF;
  162. /* init params */
  163. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  164. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  165. for (pup = 0;
  166. pup < tm->num_of_bus_per_interface; pup++) {
  167. current_vref[pup][if_id] = 0;
  168. last_vref[pup][if_id] = 0;
  169. lim_vref[pup][if_id] = 0;
  170. current_valid_window[pup][if_id] = 0;
  171. last_valid_window[pup][if_id] = 0;
  172. if (vref_window_size[if_id][pup] >
  173. vref_window_size_th) {
  174. pup_st[pup][if_id] = VREF_CONVERGE;
  175. DEBUG_TRAINING_HW_ALG(
  176. DEBUG_LEVEL_INFO,
  177. ("VREF config, IF[ %d ]pup[ %d ] - Vref tune not requered (%d)\n",
  178. if_id, pup, __LINE__));
  179. } else {
  180. pup_st[pup][if_id] = VREF_STEP_1;
  181. CHECK_STATUS(ddr3_tip_bus_read
  182. (dev_num, if_id,
  183. ACCESS_TYPE_UNICAST, pup,
  184. DDR_PHY_DATA, reg_addr, &val));
  185. CHECK_STATUS(ddr3_tip_bus_write
  186. (dev_num, ACCESS_TYPE_UNICAST,
  187. if_id, ACCESS_TYPE_UNICAST,
  188. pup, DDR_PHY_DATA, reg_addr,
  189. (val & (~0xf)) | vref_map[0]));
  190. DEBUG_TRAINING_HW_ALG(
  191. DEBUG_LEVEL_INFO,
  192. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  193. if_id, pup,
  194. (val & (~0xf)) | vref_map[0],
  195. __LINE__));
  196. }
  197. }
  198. interface_state[if_id] = 0;
  199. }
  200. /* TODO: Set number of active interfaces */
  201. num_pup = tm->num_of_bus_per_interface * MAX_INTERFACE_NUM;
  202. while ((algo_run_flag <= num_pup) & (while_count < 10)) {
  203. while_count++;
  204. for (rep = 1; rep < 4; rep++) {
  205. ddr3_tip_centr_skip_min_win_check = 1;
  206. ddr3_tip_centralization_rx(dev_num);
  207. ddr3_tip_centr_skip_min_win_check = 0;
  208. /* Read Valid window results only for non converge pups */
  209. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  210. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  211. if (interface_state[if_id] != 4) {
  212. get_valid_win_rx(dev_num, if_id, res);
  213. for (pup = 0;
  214. pup < tm->num_of_bus_per_interface;
  215. pup++) {
  216. VALIDATE_ACTIVE
  217. (tm->bus_act_mask, pup);
  218. if (pup_st[pup]
  219. [if_id] ==
  220. VREF_CONVERGE)
  221. continue;
  222. current_valid_window[pup]
  223. [if_id] =
  224. (current_valid_window[pup]
  225. [if_id] * (rep - 1) +
  226. 1000 * res[pup]) / rep;
  227. }
  228. }
  229. }
  230. }
  231. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  232. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  233. DEBUG_TRAINING_HW_ALG(
  234. DEBUG_LEVEL_TRACE,
  235. ("current_valid_window: IF[ %d ] - ", if_id));
  236. for (pup = 0;
  237. pup < tm->num_of_bus_per_interface; pup++) {
  238. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  239. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  240. ("%d ",
  241. current_valid_window
  242. [pup][if_id]));
  243. }
  244. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, ("\n"));
  245. }
  246. /* Compare results and respond as function of state */
  247. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  248. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  249. for (pup = 0;
  250. pup < tm->num_of_bus_per_interface; pup++) {
  251. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  252. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  253. ("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n",
  254. if_id, pup,
  255. pup_st[pup]
  256. [if_id], __LINE__));
  257. if (pup_st[pup][if_id] == VREF_CONVERGE)
  258. continue;
  259. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  260. ("I/F[ %d ], pup[ %d ] CHECK progress - Current %d Last %d, limit VREF %d (%d)\n",
  261. if_id, pup,
  262. current_valid_window[pup]
  263. [if_id],
  264. last_valid_window[pup]
  265. [if_id], lim_vref[pup]
  266. [if_id], __LINE__));
  267. /*
  268. * The -1 is for solution resolution +/- 1 tap
  269. * of ADLL
  270. */
  271. if (current_valid_window[pup][if_id] + 200 >=
  272. (last_valid_window[pup][if_id])) {
  273. if (pup_st[pup][if_id] == VREF_STEP_1) {
  274. /*
  275. * We stay in the same state and
  276. * step just update the window
  277. * size (take the max) and Vref
  278. */
  279. if (current_vref[pup]
  280. [if_id] == VREF_MAX_INDEX) {
  281. /*
  282. * If we step to the end
  283. * and didn't converge
  284. * to some particular
  285. * better Vref value
  286. * define the pup as
  287. * converge and step
  288. * back to nominal
  289. * Vref.
  290. */
  291. pup_st[pup]
  292. [if_id] =
  293. VREF_CONVERGE;
  294. algo_run_flag++;
  295. interface_state
  296. [if_id]++;
  297. DEBUG_TRAINING_HW_ALG
  298. (DEBUG_LEVEL_TRACE,
  299. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  300. if_id, pup,
  301. current_vref[pup]
  302. [if_id],
  303. __LINE__));
  304. } else {
  305. /* continue to update the Vref index */
  306. current_vref[pup]
  307. [if_id] =
  308. ((current_vref[pup]
  309. [if_id] +
  310. initial_step) >
  311. VREF_MAX_INDEX) ?
  312. VREF_MAX_INDEX
  313. : (current_vref[pup]
  314. [if_id] +
  315. initial_step);
  316. if (current_vref[pup]
  317. [if_id] ==
  318. VREF_MAX_INDEX) {
  319. pup_st[pup]
  320. [if_id]
  321. =
  322. VREF_STEP_2;
  323. }
  324. lim_vref[pup]
  325. [if_id] =
  326. last_vref[pup]
  327. [if_id] =
  328. current_vref[pup]
  329. [if_id];
  330. }
  331. last_valid_window[pup]
  332. [if_id] =
  333. GET_MAX(current_valid_window
  334. [pup][if_id],
  335. last_valid_window
  336. [pup]
  337. [if_id]);
  338. /* update the Vref for next stage */
  339. currrent_vref =
  340. current_vref[pup]
  341. [if_id];
  342. CHECK_STATUS
  343. (ddr3_tip_bus_read
  344. (dev_num, if_id,
  345. ACCESS_TYPE_UNICAST, pup,
  346. DDR_PHY_DATA, reg_addr,
  347. &val));
  348. CHECK_STATUS
  349. (ddr3_tip_bus_write
  350. (dev_num,
  351. ACCESS_TYPE_UNICAST,
  352. if_id,
  353. ACCESS_TYPE_UNICAST, pup,
  354. DDR_PHY_DATA, reg_addr,
  355. (val & (~0xf)) |
  356. vref_map[currrent_vref]));
  357. DEBUG_TRAINING_HW_ALG
  358. (DEBUG_LEVEL_TRACE,
  359. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  360. if_id, pup,
  361. (val & (~0xf)) |
  362. vref_map[currrent_vref],
  363. __LINE__));
  364. } else if (pup_st[pup][if_id]
  365. == VREF_STEP_2) {
  366. /*
  367. * We keep on search back with
  368. * the same step size.
  369. */
  370. last_valid_window[pup]
  371. [if_id] =
  372. GET_MAX(current_valid_window
  373. [pup][if_id],
  374. last_valid_window
  375. [pup]
  376. [if_id]);
  377. last_vref[pup][if_id] =
  378. current_vref[pup]
  379. [if_id];
  380. /* we finish all search space */
  381. if ((current_vref[pup]
  382. [if_id] - second_step) == lim_vref[pup][if_id]) {
  383. /*
  384. * If we step to the end
  385. * and didn't converge
  386. * to some particular
  387. * better Vref value
  388. * define the pup as
  389. * converge and step
  390. * back to nominal
  391. * Vref.
  392. */
  393. pup_st[pup]
  394. [if_id] =
  395. VREF_CONVERGE;
  396. algo_run_flag++;
  397. interface_state
  398. [if_id]++;
  399. current_vref[pup]
  400. [if_id] =
  401. (current_vref[pup]
  402. [if_id] -
  403. second_step);
  404. DEBUG_TRAINING_HW_ALG
  405. (DEBUG_LEVEL_TRACE,
  406. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  407. if_id, pup,
  408. current_vref[pup]
  409. [if_id],
  410. __LINE__));
  411. } else
  412. /* we finish all search space */
  413. if (current_vref[pup]
  414. [if_id] ==
  415. lim_vref[pup]
  416. [if_id]) {
  417. /*
  418. * If we step to the end
  419. * and didn't converge
  420. * to some particular
  421. * better Vref value
  422. * define the pup as
  423. * converge and step
  424. * back to nominal
  425. * Vref.
  426. */
  427. pup_st[pup]
  428. [if_id] =
  429. VREF_CONVERGE;
  430. algo_run_flag++;
  431. interface_state
  432. [if_id]++;
  433. DEBUG_TRAINING_HW_ALG
  434. (DEBUG_LEVEL_TRACE,
  435. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  436. if_id, pup,
  437. current_vref[pup]
  438. [if_id],
  439. __LINE__));
  440. } else {
  441. current_vref[pup]
  442. [if_id] =
  443. current_vref[pup]
  444. [if_id] -
  445. second_step;
  446. }
  447. /* Update the Vref for next stage */
  448. currrent_vref =
  449. current_vref[pup]
  450. [if_id];
  451. CHECK_STATUS
  452. (ddr3_tip_bus_read
  453. (dev_num, if_id,
  454. ACCESS_TYPE_UNICAST, pup,
  455. DDR_PHY_DATA, reg_addr,
  456. &val));
  457. CHECK_STATUS
  458. (ddr3_tip_bus_write
  459. (dev_num,
  460. ACCESS_TYPE_UNICAST,
  461. if_id,
  462. ACCESS_TYPE_UNICAST, pup,
  463. DDR_PHY_DATA, reg_addr,
  464. (val & (~0xf)) |
  465. vref_map[currrent_vref]));
  466. DEBUG_TRAINING_HW_ALG
  467. (DEBUG_LEVEL_TRACE,
  468. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  469. if_id, pup,
  470. (val & (~0xf)) |
  471. vref_map[currrent_vref],
  472. __LINE__));
  473. }
  474. } else {
  475. /* we change state and change step */
  476. if (pup_st[pup][if_id] == VREF_STEP_1) {
  477. pup_st[pup][if_id] =
  478. VREF_STEP_2;
  479. lim_vref[pup][if_id] =
  480. current_vref[pup]
  481. [if_id] - initial_step;
  482. last_valid_window[pup]
  483. [if_id] =
  484. current_valid_window[pup]
  485. [if_id];
  486. last_vref[pup][if_id] =
  487. current_vref[pup]
  488. [if_id];
  489. current_vref[pup][if_id] =
  490. last_vref[pup][if_id] -
  491. second_step;
  492. /* Update the Vref for next stage */
  493. CHECK_STATUS
  494. (ddr3_tip_bus_read
  495. (dev_num, if_id,
  496. ACCESS_TYPE_UNICAST, pup,
  497. DDR_PHY_DATA, reg_addr,
  498. &val));
  499. CHECK_STATUS
  500. (ddr3_tip_bus_write
  501. (dev_num,
  502. ACCESS_TYPE_UNICAST,
  503. if_id,
  504. ACCESS_TYPE_UNICAST, pup,
  505. DDR_PHY_DATA, reg_addr,
  506. (val & (~0xf)) |
  507. vref_map[current_vref[pup]
  508. [if_id]]));
  509. DEBUG_TRAINING_HW_ALG
  510. (DEBUG_LEVEL_TRACE,
  511. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  512. if_id, pup,
  513. (val & (~0xf)) |
  514. vref_map[current_vref[pup]
  515. [if_id]],
  516. __LINE__));
  517. } else if (pup_st[pup][if_id] == VREF_STEP_2) {
  518. /*
  519. * The last search was the max
  520. * point set value and exit
  521. */
  522. CHECK_STATUS
  523. (ddr3_tip_bus_read
  524. (dev_num, if_id,
  525. ACCESS_TYPE_UNICAST, pup,
  526. DDR_PHY_DATA, reg_addr,
  527. &val));
  528. CHECK_STATUS
  529. (ddr3_tip_bus_write
  530. (dev_num,
  531. ACCESS_TYPE_UNICAST,
  532. if_id,
  533. ACCESS_TYPE_UNICAST, pup,
  534. DDR_PHY_DATA, reg_addr,
  535. (val & (~0xf)) |
  536. vref_map[last_vref[pup]
  537. [if_id]]));
  538. DEBUG_TRAINING_HW_ALG
  539. (DEBUG_LEVEL_TRACE,
  540. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  541. if_id, pup,
  542. (val & (~0xf)) |
  543. vref_map[last_vref[pup]
  544. [if_id]],
  545. __LINE__));
  546. pup_st[pup][if_id] =
  547. VREF_CONVERGE;
  548. algo_run_flag++;
  549. interface_state[if_id]++;
  550. DEBUG_TRAINING_HW_ALG
  551. (DEBUG_LEVEL_TRACE,
  552. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  553. if_id, pup,
  554. current_vref[pup]
  555. [if_id], __LINE__));
  556. }
  557. }
  558. }
  559. }
  560. }
  561. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  562. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  563. for (pup = 0;
  564. pup < tm->num_of_bus_per_interface; pup++) {
  565. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  566. CHECK_STATUS(ddr3_tip_bus_read
  567. (dev_num, if_id,
  568. ACCESS_TYPE_UNICAST, pup,
  569. DDR_PHY_DATA, reg_addr, &val));
  570. DEBUG_TRAINING_HW_ALG(
  571. DEBUG_LEVEL_INFO,
  572. ("FINAL values: I/F[ %d ], pup[ %d ] - Vref = %X (%d)\n",
  573. if_id, pup, val, __LINE__));
  574. }
  575. }
  576. flow_result[if_id] = TEST_SUCCESS;
  577. /* restore start/end pattern */
  578. start_pattern = copy_start_pattern;
  579. end_pattern = copy_end_pattern;
  580. return 0;
  581. }
  582. /*
  583. * CK/CA Delay
  584. */
  585. int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
  586. {
  587. u32 if_id = 0;
  588. u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0;
  589. struct hws_topology_map *tm = ddr3_get_topology_map();
  590. /*
  591. * ck_delay_table is delaying the of the clock signal only.
  592. * (to overcome timing issues between_c_k & command/address signals)
  593. */
  594. /*
  595. * ca_delay is delaying the of the entire command & Address signals
  596. * (include Clock signal to overcome DGL error on the Clock versus
  597. * the DQS).
  598. */
  599. /* Calc ADLL Tap */
  600. if ((ck_delay == -1) || (ck_delay_16 == -1)) {
  601. DEBUG_TRAINING_HW_ALG(
  602. DEBUG_LEVEL_ERROR,
  603. ("ERROR: One of ck_delay values not initialized!!!\n"));
  604. }
  605. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  606. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  607. /* Calc delay ps in ADLL tap */
  608. if (tm->interface_params[if_id].bus_width ==
  609. BUS_WIDTH_16)
  610. ck_num_adll_tap = ck_delay_16 / adll_tap;
  611. else
  612. ck_num_adll_tap = ck_delay / adll_tap;
  613. ca_num_adll_tap = ca_delay / adll_tap;
  614. data = (ck_num_adll_tap & 0x3f) +
  615. ((ca_num_adll_tap & 0x3f) << 10);
  616. /*
  617. * Set the ADLL number to the CK ADLL for Interfaces for
  618. * all Pup
  619. */
  620. DEBUG_TRAINING_HW_ALG(
  621. DEBUG_LEVEL_TRACE,
  622. ("ck_num_adll_tap %d ca_num_adll_tap %d adll_tap %d\n",
  623. ck_num_adll_tap, ca_num_adll_tap, adll_tap));
  624. CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST,
  625. if_id, ACCESS_TYPE_MULTICAST,
  626. PARAM_NOT_CARE, DDR_PHY_CONTROL,
  627. 0x0, data));
  628. }
  629. return MV_OK;
  630. }