ddr3_training_hw_algo.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. #define VREF_INITIAL_STEP 3
  13. #define VREF_SECOND_STEP 1
  14. #define VREF_MAX_INDEX 7
  15. #define MAX_VALUE (1024 - 1)
  16. #define MIN_VALUE (-MAX_VALUE)
  17. #define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0x1f)
  18. u32 ck_delay = (u32)-1, ck_delay_16 = (u32)-1;
  19. u32 ca_delay;
  20. int ddr3_tip_centr_skip_min_win_check = 0;
  21. u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  22. u8 last_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  23. u16 current_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  24. u16 last_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  25. u8 lim_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  26. u8 interface_state[MAX_INTERFACE_NUM];
  27. u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  28. u8 vref_window_size_th = 12;
  29. static u8 pup_st[MAX_BUS_NUM][MAX_INTERFACE_NUM];
  30. static u32 rd_sample_mask[] = {
  31. 0,
  32. 8,
  33. 16,
  34. 24
  35. };
  36. #define VREF_STEP_1 0
  37. #define VREF_STEP_2 1
  38. #define VREF_CONVERGE 2
  39. /*
  40. * ODT additional timing
  41. */
  42. int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
  43. {
  44. u32 cs_num = 0, max_cs = 0, max_read_sample = 0, min_read_sample = 0x1f;
  45. u32 data_read[MAX_INTERFACE_NUM] = { 0 };
  46. u32 read_sample[MAX_CS_NUM];
  47. u32 val;
  48. u32 pup_index;
  49. int max_phase = MIN_VALUE, current_phase;
  50. enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
  51. struct hws_topology_map *tm = ddr3_get_topology_map();
  52. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  53. DUNIT_ODT_CONTROL_REG,
  54. 0 << 8, 0x3 << 8));
  55. CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id,
  56. READ_DATA_SAMPLE_DELAY,
  57. data_read, MASK_ALL_BITS));
  58. val = data_read[if_id];
  59. max_cs = hws_ddr3_tip_max_cs_get();
  60. for (cs_num = 0; cs_num < max_cs; cs_num++) {
  61. read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num);
  62. /* find maximum of read_samples */
  63. if (read_sample[cs_num] >= max_read_sample) {
  64. if (read_sample[cs_num] == max_read_sample) {
  65. /* search for max phase */;
  66. } else {
  67. max_read_sample = read_sample[cs_num];
  68. max_phase = MIN_VALUE;
  69. }
  70. for (pup_index = 0;
  71. pup_index < tm->num_of_bus_per_interface;
  72. pup_index++) {
  73. CHECK_STATUS(ddr3_tip_bus_read
  74. (dev_num, if_id,
  75. ACCESS_TYPE_UNICAST, pup_index,
  76. DDR_PHY_DATA,
  77. RL_PHY_REG + CS_REG_VALUE(cs_num),
  78. &val));
  79. current_phase = ((int)val & 0xe0) >> 6;
  80. if (current_phase >= max_phase)
  81. max_phase = current_phase;
  82. }
  83. }
  84. /* find minimum */
  85. if (read_sample[cs_num] < min_read_sample)
  86. min_read_sample = read_sample[cs_num];
  87. }
  88. if (min_read_sample <= tm->interface_params[if_id].cas_l) {
  89. min_read_sample = (int)tm->interface_params[if_id].cas_l;
  90. }
  91. min_read_sample = min_read_sample - 1;
  92. max_read_sample = max_read_sample + 4 + (max_phase + 1) / 2 + 1;
  93. if (max_read_sample >= 0x1f)
  94. max_read_sample = 0x1f;
  95. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  96. ODT_TIMING_LOW,
  97. ((min_read_sample - 1) << 12),
  98. 0xf << 12));
  99. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  100. ODT_TIMING_LOW,
  101. (max_read_sample << 16),
  102. 0x1f << 16));
  103. return MV_OK;
  104. }
  105. int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
  106. {
  107. u32 reg_pup = RESULT_DB_PHY_REG_ADDR;
  108. u32 reg_data;
  109. u32 cs_num;
  110. int i;
  111. cs_num = 0;
  112. /* TBD */
  113. reg_pup += cs_num;
  114. for (i = 0; i < 4; i++) {
  115. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  116. ACCESS_TYPE_UNICAST, i,
  117. DDR_PHY_DATA, reg_pup,
  118. &reg_data));
  119. res[i] = (reg_data >> RESULT_DB_PHY_REG_RX_OFFSET) & 0x1f;
  120. }
  121. return 0;
  122. }
  123. /*
  124. * This algorithm deals with the vertical optimum from Voltage point of view
  125. * of the sample signal.
  126. * Voltage sample point can improve the Eye / window size of the bit and the
  127. * pup.
  128. * The problem is that it is tune for all DQ the same so there isn't any
  129. * PBS like code.
  130. * It is more like centralization.
  131. * But because we don't have The training SM support we do it a bit more
  132. * smart search to save time.
  133. */
  134. int ddr3_tip_vref(u32 dev_num)
  135. {
  136. /*
  137. * The Vref register have non linear order. Need to check what will be
  138. * in future projects.
  139. */
  140. u32 vref_map[8] = {
  141. 1, 2, 3, 4, 5, 6, 7, 0
  142. };
  143. /* State and parameter definitions */
  144. u32 initial_step = VREF_INITIAL_STEP;
  145. /* need to be assign with minus ????? */
  146. u32 second_step = VREF_SECOND_STEP;
  147. u32 algo_run_flag = 0, currrent_vref = 0;
  148. u32 while_count = 0;
  149. u32 pup = 0, if_id = 0, num_pup = 0, rep = 0;
  150. u32 val = 0;
  151. u32 reg_addr = 0xa8;
  152. u32 copy_start_pattern, copy_end_pattern;
  153. enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
  154. u8 res[4];
  155. struct hws_topology_map *tm = ddr3_get_topology_map();
  156. CHECK_STATUS(ddr3_tip_special_rx(dev_num));
  157. /* save start/end pattern */
  158. copy_start_pattern = start_pattern;
  159. copy_end_pattern = end_pattern;
  160. /* set vref as centralization pattern */
  161. start_pattern = PATTERN_VREF;
  162. end_pattern = PATTERN_VREF;
  163. /* init params */
  164. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  165. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  166. for (pup = 0;
  167. pup < tm->num_of_bus_per_interface; pup++) {
  168. current_vref[pup][if_id] = 0;
  169. last_vref[pup][if_id] = 0;
  170. lim_vref[pup][if_id] = 0;
  171. current_valid_window[pup][if_id] = 0;
  172. last_valid_window[pup][if_id] = 0;
  173. if (vref_window_size[if_id][pup] >
  174. vref_window_size_th) {
  175. pup_st[pup][if_id] = VREF_CONVERGE;
  176. DEBUG_TRAINING_HW_ALG(
  177. DEBUG_LEVEL_INFO,
  178. ("VREF config, IF[ %d ]pup[ %d ] - Vref tune not requered (%d)\n",
  179. if_id, pup, __LINE__));
  180. } else {
  181. pup_st[pup][if_id] = VREF_STEP_1;
  182. CHECK_STATUS(ddr3_tip_bus_read
  183. (dev_num, if_id,
  184. ACCESS_TYPE_UNICAST, pup,
  185. DDR_PHY_DATA, reg_addr, &val));
  186. CHECK_STATUS(ddr3_tip_bus_write
  187. (dev_num, ACCESS_TYPE_UNICAST,
  188. if_id, ACCESS_TYPE_UNICAST,
  189. pup, DDR_PHY_DATA, reg_addr,
  190. (val & (~0xf)) | vref_map[0]));
  191. DEBUG_TRAINING_HW_ALG(
  192. DEBUG_LEVEL_INFO,
  193. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  194. if_id, pup,
  195. (val & (~0xf)) | vref_map[0],
  196. __LINE__));
  197. }
  198. }
  199. interface_state[if_id] = 0;
  200. }
  201. /* TODO: Set number of active interfaces */
  202. num_pup = tm->num_of_bus_per_interface * MAX_INTERFACE_NUM;
  203. while ((algo_run_flag <= num_pup) & (while_count < 10)) {
  204. while_count++;
  205. for (rep = 1; rep < 4; rep++) {
  206. ddr3_tip_centr_skip_min_win_check = 1;
  207. ddr3_tip_centralization_rx(dev_num);
  208. ddr3_tip_centr_skip_min_win_check = 0;
  209. /* Read Valid window results only for non converge pups */
  210. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  211. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  212. if (interface_state[if_id] != 4) {
  213. get_valid_win_rx(dev_num, if_id, res);
  214. for (pup = 0;
  215. pup < tm->num_of_bus_per_interface;
  216. pup++) {
  217. VALIDATE_ACTIVE
  218. (tm->bus_act_mask, pup);
  219. if (pup_st[pup]
  220. [if_id] ==
  221. VREF_CONVERGE)
  222. continue;
  223. current_valid_window[pup]
  224. [if_id] =
  225. (current_valid_window[pup]
  226. [if_id] * (rep - 1) +
  227. 1000 * res[pup]) / rep;
  228. }
  229. }
  230. }
  231. }
  232. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  233. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  234. DEBUG_TRAINING_HW_ALG(
  235. DEBUG_LEVEL_TRACE,
  236. ("current_valid_window: IF[ %d ] - ", if_id));
  237. for (pup = 0;
  238. pup < tm->num_of_bus_per_interface; pup++) {
  239. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  240. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  241. ("%d ",
  242. current_valid_window
  243. [pup][if_id]));
  244. }
  245. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, ("\n"));
  246. }
  247. /* Compare results and respond as function of state */
  248. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  249. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  250. for (pup = 0;
  251. pup < tm->num_of_bus_per_interface; pup++) {
  252. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  253. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  254. ("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n",
  255. if_id, pup,
  256. pup_st[pup]
  257. [if_id], __LINE__));
  258. if (pup_st[pup][if_id] == VREF_CONVERGE)
  259. continue;
  260. DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
  261. ("I/F[ %d ], pup[ %d ] CHECK progress - Current %d Last %d, limit VREF %d (%d)\n",
  262. if_id, pup,
  263. current_valid_window[pup]
  264. [if_id],
  265. last_valid_window[pup]
  266. [if_id], lim_vref[pup]
  267. [if_id], __LINE__));
  268. /*
  269. * The -1 is for solution resolution +/- 1 tap
  270. * of ADLL
  271. */
  272. if (current_valid_window[pup][if_id] + 200 >=
  273. (last_valid_window[pup][if_id])) {
  274. if (pup_st[pup][if_id] == VREF_STEP_1) {
  275. /*
  276. * We stay in the same state and
  277. * step just update the window
  278. * size (take the max) and Vref
  279. */
  280. if (current_vref[pup]
  281. [if_id] == VREF_MAX_INDEX) {
  282. /*
  283. * If we step to the end
  284. * and didn't converge
  285. * to some particular
  286. * better Vref value
  287. * define the pup as
  288. * converge and step
  289. * back to nominal
  290. * Vref.
  291. */
  292. pup_st[pup]
  293. [if_id] =
  294. VREF_CONVERGE;
  295. algo_run_flag++;
  296. interface_state
  297. [if_id]++;
  298. DEBUG_TRAINING_HW_ALG
  299. (DEBUG_LEVEL_TRACE,
  300. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  301. if_id, pup,
  302. current_vref[pup]
  303. [if_id],
  304. __LINE__));
  305. } else {
  306. /* continue to update the Vref index */
  307. current_vref[pup]
  308. [if_id] =
  309. ((current_vref[pup]
  310. [if_id] +
  311. initial_step) >
  312. VREF_MAX_INDEX) ?
  313. VREF_MAX_INDEX
  314. : (current_vref[pup]
  315. [if_id] +
  316. initial_step);
  317. if (current_vref[pup]
  318. [if_id] ==
  319. VREF_MAX_INDEX) {
  320. pup_st[pup]
  321. [if_id]
  322. =
  323. VREF_STEP_2;
  324. }
  325. lim_vref[pup]
  326. [if_id] =
  327. last_vref[pup]
  328. [if_id] =
  329. current_vref[pup]
  330. [if_id];
  331. }
  332. last_valid_window[pup]
  333. [if_id] =
  334. GET_MAX(current_valid_window
  335. [pup][if_id],
  336. last_valid_window
  337. [pup]
  338. [if_id]);
  339. /* update the Vref for next stage */
  340. currrent_vref =
  341. current_vref[pup]
  342. [if_id];
  343. CHECK_STATUS
  344. (ddr3_tip_bus_read
  345. (dev_num, if_id,
  346. ACCESS_TYPE_UNICAST, pup,
  347. DDR_PHY_DATA, reg_addr,
  348. &val));
  349. CHECK_STATUS
  350. (ddr3_tip_bus_write
  351. (dev_num,
  352. ACCESS_TYPE_UNICAST,
  353. if_id,
  354. ACCESS_TYPE_UNICAST, pup,
  355. DDR_PHY_DATA, reg_addr,
  356. (val & (~0xf)) |
  357. vref_map[currrent_vref]));
  358. DEBUG_TRAINING_HW_ALG
  359. (DEBUG_LEVEL_TRACE,
  360. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  361. if_id, pup,
  362. (val & (~0xf)) |
  363. vref_map[currrent_vref],
  364. __LINE__));
  365. } else if (pup_st[pup][if_id]
  366. == VREF_STEP_2) {
  367. /*
  368. * We keep on search back with
  369. * the same step size.
  370. */
  371. last_valid_window[pup]
  372. [if_id] =
  373. GET_MAX(current_valid_window
  374. [pup][if_id],
  375. last_valid_window
  376. [pup]
  377. [if_id]);
  378. last_vref[pup][if_id] =
  379. current_vref[pup]
  380. [if_id];
  381. /* we finish all search space */
  382. if ((current_vref[pup]
  383. [if_id] - second_step) == lim_vref[pup][if_id]) {
  384. /*
  385. * If we step to the end
  386. * and didn't converge
  387. * to some particular
  388. * better Vref value
  389. * define the pup as
  390. * converge and step
  391. * back to nominal
  392. * Vref.
  393. */
  394. pup_st[pup]
  395. [if_id] =
  396. VREF_CONVERGE;
  397. algo_run_flag++;
  398. interface_state
  399. [if_id]++;
  400. current_vref[pup]
  401. [if_id] =
  402. (current_vref[pup]
  403. [if_id] -
  404. second_step);
  405. DEBUG_TRAINING_HW_ALG
  406. (DEBUG_LEVEL_TRACE,
  407. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  408. if_id, pup,
  409. current_vref[pup]
  410. [if_id],
  411. __LINE__));
  412. } else
  413. /* we finish all search space */
  414. if (current_vref[pup]
  415. [if_id] ==
  416. lim_vref[pup]
  417. [if_id]) {
  418. /*
  419. * If we step to the end
  420. * and didn't converge
  421. * to some particular
  422. * better Vref value
  423. * define the pup as
  424. * converge and step
  425. * back to nominal
  426. * Vref.
  427. */
  428. pup_st[pup]
  429. [if_id] =
  430. VREF_CONVERGE;
  431. algo_run_flag++;
  432. interface_state
  433. [if_id]++;
  434. DEBUG_TRAINING_HW_ALG
  435. (DEBUG_LEVEL_TRACE,
  436. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  437. if_id, pup,
  438. current_vref[pup]
  439. [if_id],
  440. __LINE__));
  441. } else {
  442. current_vref[pup]
  443. [if_id] =
  444. current_vref[pup]
  445. [if_id] -
  446. second_step;
  447. }
  448. /* Update the Vref for next stage */
  449. currrent_vref =
  450. current_vref[pup]
  451. [if_id];
  452. CHECK_STATUS
  453. (ddr3_tip_bus_read
  454. (dev_num, if_id,
  455. ACCESS_TYPE_UNICAST, pup,
  456. DDR_PHY_DATA, reg_addr,
  457. &val));
  458. CHECK_STATUS
  459. (ddr3_tip_bus_write
  460. (dev_num,
  461. ACCESS_TYPE_UNICAST,
  462. if_id,
  463. ACCESS_TYPE_UNICAST, pup,
  464. DDR_PHY_DATA, reg_addr,
  465. (val & (~0xf)) |
  466. vref_map[currrent_vref]));
  467. DEBUG_TRAINING_HW_ALG
  468. (DEBUG_LEVEL_TRACE,
  469. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  470. if_id, pup,
  471. (val & (~0xf)) |
  472. vref_map[currrent_vref],
  473. __LINE__));
  474. }
  475. } else {
  476. /* we change state and change step */
  477. if (pup_st[pup][if_id] == VREF_STEP_1) {
  478. pup_st[pup][if_id] =
  479. VREF_STEP_2;
  480. lim_vref[pup][if_id] =
  481. current_vref[pup]
  482. [if_id] - initial_step;
  483. last_valid_window[pup]
  484. [if_id] =
  485. current_valid_window[pup]
  486. [if_id];
  487. last_vref[pup][if_id] =
  488. current_vref[pup]
  489. [if_id];
  490. current_vref[pup][if_id] =
  491. last_vref[pup][if_id] -
  492. second_step;
  493. /* Update the Vref for next stage */
  494. CHECK_STATUS
  495. (ddr3_tip_bus_read
  496. (dev_num, if_id,
  497. ACCESS_TYPE_UNICAST, pup,
  498. DDR_PHY_DATA, reg_addr,
  499. &val));
  500. CHECK_STATUS
  501. (ddr3_tip_bus_write
  502. (dev_num,
  503. ACCESS_TYPE_UNICAST,
  504. if_id,
  505. ACCESS_TYPE_UNICAST, pup,
  506. DDR_PHY_DATA, reg_addr,
  507. (val & (~0xf)) |
  508. vref_map[current_vref[pup]
  509. [if_id]]));
  510. DEBUG_TRAINING_HW_ALG
  511. (DEBUG_LEVEL_TRACE,
  512. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  513. if_id, pup,
  514. (val & (~0xf)) |
  515. vref_map[current_vref[pup]
  516. [if_id]],
  517. __LINE__));
  518. } else if (pup_st[pup][if_id] == VREF_STEP_2) {
  519. /*
  520. * The last search was the max
  521. * point set value and exit
  522. */
  523. CHECK_STATUS
  524. (ddr3_tip_bus_read
  525. (dev_num, if_id,
  526. ACCESS_TYPE_UNICAST, pup,
  527. DDR_PHY_DATA, reg_addr,
  528. &val));
  529. CHECK_STATUS
  530. (ddr3_tip_bus_write
  531. (dev_num,
  532. ACCESS_TYPE_UNICAST,
  533. if_id,
  534. ACCESS_TYPE_UNICAST, pup,
  535. DDR_PHY_DATA, reg_addr,
  536. (val & (~0xf)) |
  537. vref_map[last_vref[pup]
  538. [if_id]]));
  539. DEBUG_TRAINING_HW_ALG
  540. (DEBUG_LEVEL_TRACE,
  541. ("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
  542. if_id, pup,
  543. (val & (~0xf)) |
  544. vref_map[last_vref[pup]
  545. [if_id]],
  546. __LINE__));
  547. pup_st[pup][if_id] =
  548. VREF_CONVERGE;
  549. algo_run_flag++;
  550. interface_state[if_id]++;
  551. DEBUG_TRAINING_HW_ALG
  552. (DEBUG_LEVEL_TRACE,
  553. ("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
  554. if_id, pup,
  555. current_vref[pup]
  556. [if_id], __LINE__));
  557. }
  558. }
  559. }
  560. }
  561. }
  562. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  563. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  564. for (pup = 0;
  565. pup < tm->num_of_bus_per_interface; pup++) {
  566. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  567. CHECK_STATUS(ddr3_tip_bus_read
  568. (dev_num, if_id,
  569. ACCESS_TYPE_UNICAST, pup,
  570. DDR_PHY_DATA, reg_addr, &val));
  571. DEBUG_TRAINING_HW_ALG(
  572. DEBUG_LEVEL_INFO,
  573. ("FINAL values: I/F[ %d ], pup[ %d ] - Vref = %X (%d)\n",
  574. if_id, pup, val, __LINE__));
  575. }
  576. }
  577. flow_result[if_id] = TEST_SUCCESS;
  578. /* restore start/end pattern */
  579. start_pattern = copy_start_pattern;
  580. end_pattern = copy_end_pattern;
  581. return 0;
  582. }
  583. /*
  584. * CK/CA Delay
  585. */
  586. int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
  587. {
  588. u32 if_id = 0;
  589. u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0;
  590. struct hws_topology_map *tm = ddr3_get_topology_map();
  591. /*
  592. * ck_delay_table is delaying the of the clock signal only.
  593. * (to overcome timing issues between_c_k & command/address signals)
  594. */
  595. /*
  596. * ca_delay is delaying the of the entire command & Address signals
  597. * (include Clock signal to overcome DGL error on the Clock versus
  598. * the DQS).
  599. */
  600. /* Calc ADLL Tap */
  601. if ((ck_delay == -1) || (ck_delay_16 == -1)) {
  602. DEBUG_TRAINING_HW_ALG(
  603. DEBUG_LEVEL_ERROR,
  604. ("ERROR: One of ck_delay values not initialized!!!\n"));
  605. }
  606. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  607. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  608. /* Calc delay ps in ADLL tap */
  609. if (tm->interface_params[if_id].bus_width ==
  610. BUS_WIDTH_16)
  611. ck_num_adll_tap = ck_delay_16 / adll_tap;
  612. else
  613. ck_num_adll_tap = ck_delay / adll_tap;
  614. ca_num_adll_tap = ca_delay / adll_tap;
  615. data = (ck_num_adll_tap & 0x3f) +
  616. ((ca_num_adll_tap & 0x3f) << 10);
  617. /*
  618. * Set the ADLL number to the CK ADLL for Interfaces for
  619. * all Pup
  620. */
  621. DEBUG_TRAINING_HW_ALG(
  622. DEBUG_LEVEL_TRACE,
  623. ("ck_num_adll_tap %d ca_num_adll_tap %d adll_tap %d\n",
  624. ck_num_adll_tap, ca_num_adll_tap, adll_tap));
  625. CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST,
  626. if_id, ACCESS_TYPE_MULTICAST,
  627. PARAM_NOT_CARE, DDR_PHY_CONTROL,
  628. 0x0, data));
  629. }
  630. return MV_OK;
  631. }