ddr3_training_bist.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include "ddr3_init.h"
  6. static u32 bist_offset = 32;
  7. enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0;
  8. static int ddr3_tip_bist_operation(u32 dev_num,
  9. enum hws_access_type access_type,
  10. u32 if_id,
  11. enum hws_bist_operation oper_type);
  12. /*
  13. * BIST activate
  14. */
  15. int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
  16. enum hws_access_type access_type, u32 if_num,
  17. enum hws_dir dir,
  18. enum hws_stress_jump addr_stress_jump,
  19. enum hws_pattern_duration duration,
  20. enum hws_bist_operation oper_type,
  21. u32 offset, u32 cs_num, u32 pattern_addr_length)
  22. {
  23. u32 tx_burst_size;
  24. u32 delay_between_burst;
  25. u32 rd_mode;
  26. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  27. /* odpg bist write enable */
  28. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
  29. (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
  30. (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
  31. /* odpg bist read enable/disable */
  32. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
  33. (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
  34. (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
  35. (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
  36. ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
  37. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS);
  38. tx_burst_size = (dir == OPER_WRITE) ?
  39. pattern_table[pattern].tx_burst_size : 0;
  40. delay_between_burst = (dir == OPER_WRITE) ? 2 : 0;
  41. rd_mode = (dir == OPER_WRITE) ? 1 : 0;
  42. ddr3_tip_configure_odpg(0, access_type, 0, dir,
  43. pattern_table[pattern].num_of_phases_tx, tx_burst_size,
  44. pattern_table[pattern].num_of_phases_rx,
  45. delay_between_burst,
  46. rd_mode, cs_num, addr_stress_jump, duration);
  47. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS);
  48. if (oper_type == BIST_STOP) {
  49. ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
  50. } else {
  51. ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
  52. if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
  53. return MV_FAIL;
  54. ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
  55. }
  56. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
  57. return MV_OK;
  58. }
  59. /*
  60. * BIST read result
  61. */
  62. int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
  63. struct bist_result *pst_bist_result)
  64. {
  65. int ret;
  66. u32 read_data[MAX_INTERFACE_NUM];
  67. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  68. if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0)
  69. return MV_NOT_SUPPORTED;
  70. DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE,
  71. ("ddr3_tip_bist_read_result if_id %d\n",
  72. if_id));
  73. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  74. ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data,
  75. MASK_ALL_BITS);
  76. if (ret != MV_OK)
  77. return ret;
  78. pst_bist_result->bist_fail_high = read_data[if_id];
  79. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  80. ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data,
  81. MASK_ALL_BITS);
  82. if (ret != MV_OK)
  83. return ret;
  84. pst_bist_result->bist_fail_low = read_data[if_id];
  85. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  86. ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data,
  87. MASK_ALL_BITS);
  88. if (ret != MV_OK)
  89. return ret;
  90. pst_bist_result->bist_last_fail_addr = read_data[if_id];
  91. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  92. ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data,
  93. MASK_ALL_BITS);
  94. if (ret != MV_OK)
  95. return ret;
  96. pst_bist_result->bist_error_cnt = read_data[if_id];
  97. return MV_OK;
  98. }
  99. /*
  100. * BIST flow - Activate & read result
  101. */
  102. int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
  103. u32 cs_num)
  104. {
  105. int ret;
  106. u32 i = 0;
  107. u32 win_base;
  108. struct bist_result st_bist_result;
  109. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  110. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  111. VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
  112. hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base);
  113. ret = ddr3_tip_bist_activate(dev_num, pattern,
  114. ACCESS_TYPE_UNICAST,
  115. i, OPER_WRITE, STRESS_NONE,
  116. DURATION_SINGLE, BIST_START,
  117. bist_offset + win_base,
  118. cs_num, 15);
  119. if (ret != MV_OK) {
  120. printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
  121. return ret;
  122. }
  123. ret = ddr3_tip_bist_activate(dev_num, pattern,
  124. ACCESS_TYPE_UNICAST,
  125. i, OPER_READ, STRESS_NONE,
  126. DURATION_SINGLE, BIST_START,
  127. bist_offset + win_base,
  128. cs_num, 15);
  129. if (ret != MV_OK) {
  130. printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
  131. return ret;
  132. }
  133. ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result);
  134. if (ret != MV_OK) {
  135. printf("ddr3_tip_bist_read_result failed\n");
  136. return ret;
  137. }
  138. result[i] = st_bist_result.bist_error_cnt;
  139. }
  140. return MV_OK;
  141. }
  142. /*
  143. * Set BIST Operation
  144. */
  145. static int ddr3_tip_bist_operation(u32 dev_num,
  146. enum hws_access_type access_type,
  147. u32 if_id, enum hws_bist_operation oper_type)
  148. {
  149. if (oper_type == BIST_STOP)
  150. mv_ddr_odpg_disable();
  151. else
  152. mv_ddr_odpg_enable();
  153. return MV_OK;
  154. }
  155. /*
  156. * Print BIST result
  157. */
  158. void ddr3_tip_print_bist_res(void)
  159. {
  160. u32 dev_num = 0;
  161. u32 i;
  162. struct bist_result st_bist_result[MAX_INTERFACE_NUM];
  163. int res;
  164. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  165. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  166. VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
  167. res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]);
  168. if (res != MV_OK) {
  169. DEBUG_TRAINING_BIST_ENGINE(
  170. DEBUG_LEVEL_ERROR,
  171. ("ddr3_tip_bist_read_result failed\n"));
  172. return;
  173. }
  174. }
  175. DEBUG_TRAINING_BIST_ENGINE(
  176. DEBUG_LEVEL_INFO,
  177. ("interface | error_cnt | fail_low | fail_high | fail_addr\n"));
  178. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  179. VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
  180. DEBUG_TRAINING_BIST_ENGINE(
  181. DEBUG_LEVEL_INFO,
  182. ("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n",
  183. i, st_bist_result[i].bist_error_cnt,
  184. st_bist_result[i].bist_fail_low,
  185. st_bist_result[i].bist_fail_high,
  186. st_bist_result[i].bist_last_fail_addr));
  187. }
  188. }
  189. enum {
  190. PASS,
  191. FAIL
  192. };
  193. #define TIP_ITERATION_NUM 31
  194. static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result)
  195. {
  196. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  197. enum hws_training_ip_stat training_result;
  198. u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  199. u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
  200. u32 subphy, read_data;
  201. ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  202. RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val,
  203. TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result);
  204. for (subphy = 0; subphy < max_subphy; subphy++) {
  205. ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS);
  206. if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED)
  207. *result |= (FAIL << subphy);
  208. }
  209. return MV_OK;
  210. }
  211. struct interval {
  212. u8 *vector;
  213. u8 lendpnt; /* interval's left endpoint */
  214. u8 rendpnt; /* interval's right endpoint */
  215. u8 size; /* interval's size */
  216. u8 lmarker; /* left marker */
  217. u8 rmarker; /* right marker */
  218. u8 pass_lendpnt; /* left endpoint of internal pass interval */
  219. u8 pass_rendpnt; /* right endpoint of internal pass interval */
  220. };
  221. static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt,
  222. u8 lmarker, u8 rmarker, struct interval *intrvl)
  223. {
  224. if (intrvl == NULL) {
  225. printf("%s: NULL intrvl pointer found\n", __func__);
  226. return MV_FAIL;
  227. }
  228. if (vector == NULL) {
  229. printf("%s: NULL vector pointer found\n", __func__);
  230. return MV_FAIL;
  231. }
  232. intrvl->vector = vector;
  233. if (lendpnt >= rendpnt) {
  234. printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__);
  235. return MV_FAIL;
  236. }
  237. intrvl->lendpnt = lendpnt;
  238. intrvl->rendpnt = rendpnt;
  239. intrvl->size = rendpnt - lendpnt + 1;
  240. if ((lmarker < lendpnt) || (lmarker > rendpnt)) {
  241. printf("%s: incorrect lmarker parameter found\n", __func__);
  242. return MV_FAIL;
  243. }
  244. intrvl->lmarker = lmarker;
  245. if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) {
  246. printf("%s: incorrect rmarker parameter found\n", __func__);
  247. return MV_FAIL;
  248. }
  249. intrvl->rmarker = rmarker;
  250. return MV_OK;
  251. }
  252. static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl)
  253. {
  254. if (intrvl == NULL) {
  255. printf("%s: NULL intrvl pointer found\n", __func__);
  256. return MV_FAIL;
  257. }
  258. intrvl->pass_lendpnt = pass_lendpnt;
  259. intrvl->pass_rendpnt = pass_rendpnt;
  260. return MV_OK;
  261. }
  262. static int interval_proc(struct interval *intrvl)
  263. {
  264. int curr;
  265. int pass_lendpnt, pass_rendpnt;
  266. int lmt;
  267. int fcnt = 0, pcnt = 0;
  268. if (intrvl == NULL) {
  269. printf("%s: NULL intrvl pointer found\n", __func__);
  270. return MV_FAIL;
  271. }
  272. /* count fails and passes */
  273. curr = intrvl->lendpnt;
  274. while (curr <= intrvl->rendpnt) {
  275. if (intrvl->vector[curr] == PASS)
  276. pcnt++;
  277. else
  278. fcnt++;
  279. curr++;
  280. }
  281. /* check for all fail */
  282. if (fcnt == intrvl->size) {
  283. printf("%s: no pass found\n", __func__);
  284. return MV_FAIL;
  285. }
  286. /* check for all pass */
  287. if (pcnt == intrvl->size) {
  288. if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK)
  289. return MV_FAIL;
  290. return MV_OK;
  291. }
  292. /* proceed with rmarker */
  293. curr = intrvl->rmarker;
  294. if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */
  295. /* search for fail on right */
  296. if (intrvl->rmarker > intrvl->rendpnt)
  297. lmt = intrvl->rendpnt + intrvl->size;
  298. else
  299. lmt = intrvl->rmarker + intrvl->size - 1;
  300. while ((curr <= lmt) &&
  301. (intrvl->vector[curr % intrvl->size] == PASS))
  302. curr++;
  303. if (curr > lmt) { /* fail not found */
  304. printf("%s: rmarker: fail following pass not found\n", __func__);
  305. return MV_FAIL;
  306. }
  307. /* fail found */
  308. pass_rendpnt = curr - 1;
  309. } else { /* fail at rmarker */
  310. /* search for pass on left */
  311. if (intrvl->rmarker > intrvl->rendpnt)
  312. lmt = intrvl->rmarker - intrvl->size + 1;
  313. else
  314. lmt = intrvl->lendpnt;
  315. while ((curr >= lmt) &&
  316. (intrvl->vector[curr % intrvl->size] == FAIL))
  317. curr--;
  318. if (curr < lmt) { /* pass not found */
  319. printf("%s: rmarker: pass preceding fail not found\n", __func__);
  320. return MV_FAIL;
  321. }
  322. /* pass found */
  323. pass_rendpnt = curr;
  324. }
  325. /* search for fail on left */
  326. curr = pass_rendpnt;
  327. if (pass_rendpnt > intrvl->rendpnt)
  328. lmt = pass_rendpnt - intrvl->size + 1;
  329. else
  330. lmt = intrvl->lendpnt;
  331. while ((curr >= lmt) &&
  332. (intrvl->vector[curr % intrvl->size] == PASS))
  333. curr--;
  334. if (curr < lmt) { /* fail not found */
  335. printf("%s: rmarker: fail preceding pass not found\n", __func__);
  336. return MV_FAIL;
  337. }
  338. /* fail found */
  339. pass_lendpnt = curr + 1;
  340. if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK)
  341. return MV_FAIL;
  342. return MV_OK;
  343. }
  344. #define ADLL_TAPS_PER_PERIOD 64
  345. int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector,
  346. int *vw_sphy_hi_diff, int *vw_sphy_lo_diff)
  347. {
  348. struct interval intrvl;
  349. /* init interval structure */
  350. if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1,
  351. vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK)
  352. return MV_FAIL;
  353. /* find pass sub-interval */
  354. if (interval_proc(&intrvl) != MV_OK)
  355. return MV_FAIL;
  356. /* check for all pass */
  357. if ((intrvl.pass_rendpnt == intrvl.rendpnt) &&
  358. (intrvl.pass_lendpnt == intrvl.lendpnt)) {
  359. printf("%s: no fail found\n", __func__);
  360. return MV_FAIL;
  361. }
  362. *vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt;
  363. *vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt;
  364. return MV_OK;
  365. }
  366. static int mv_ddr_bist_tx(enum hws_access_type access_type)
  367. {
  368. mv_ddr_odpg_done_clr();
  369. ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
  370. if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
  371. return MV_FAIL;
  372. ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
  373. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
  374. return MV_OK;
  375. }
  376. /* prepare odpg for bist operation */
  377. #define WR_OP_ODPG_DATA_CMD_BURST_DLY 2
  378. static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type,
  379. enum hws_dir dir, enum hws_stress_jump stress_jump_addr,
  380. enum hws_pattern_duration duration, u32 offset, u32 cs,
  381. u32 pattern_addr_len, enum dm_direction dm_dir)
  382. {
  383. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  384. u32 tx_burst_size;
  385. u32 burst_delay;
  386. u32 rd_mode;
  387. /* odpg bist write enable */
  388. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
  389. (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
  390. (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
  391. /* odpg bist read enable/disable */
  392. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
  393. (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
  394. (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
  395. (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
  396. if (pattern == PATTERN_00 || pattern == PATTERN_FF)
  397. ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
  398. else
  399. mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir);
  400. ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS);
  401. if (dir == OPER_WRITE) {
  402. tx_burst_size = pattern_table[pattern].tx_burst_size;
  403. burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY;
  404. rd_mode = ODPG_MODE_TX;
  405. } else {
  406. tx_burst_size = 0;
  407. burst_delay = 0;
  408. rd_mode = ODPG_MODE_RX;
  409. }
  410. ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx,
  411. tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay,
  412. rd_mode, cs, stress_jump_addr, duration);
  413. return MV_OK;
  414. }
  415. #define BYTES_PER_BURST_64BIT 0x20
  416. #define BYTES_PER_BURST_32BIT 0x10
  417. int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector)
  418. {
  419. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  420. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  421. u32 adll_tap;
  422. u32 wr_ctrl_adll[MAX_BUS_NUM] = {0};
  423. u32 rd_ctrl_adll[MAX_BUS_NUM] = {0};
  424. u32 subphy;
  425. u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
  426. u32 odpg_addr = 0x0;
  427. u32 result;
  428. u32 idx;
  429. /* burst length in bytes */
  430. u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ?
  431. BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT);
  432. /* save dqs values to restore after algorithm's run */
  433. ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS);
  434. ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS);
  435. /* fill memory with base pattern */
  436. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
  437. mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
  438. bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
  439. (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
  440. for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
  441. /* change target odpg address */
  442. odpg_addr = adll_tap * burst_len;
  443. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
  444. odpg_addr, MASK_ALL_BITS);
  445. ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
  446. pattern_table[pattern].num_of_phases_tx,
  447. pattern_table[pattern].tx_burst_size,
  448. pattern_table[pattern].num_of_phases_rx,
  449. WR_OP_ODPG_DATA_CMD_BURST_DLY,
  450. ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
  451. /* odpg bist write enable */
  452. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
  453. (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
  454. (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
  455. /* odpg bist read disable */
  456. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
  457. (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
  458. (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
  459. /* trigger odpg */
  460. mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
  461. }
  462. /* fill memory with vref pattern to increment addr using odpg bist */
  463. mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
  464. bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
  465. (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
  466. for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
  467. ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0,
  468. DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap);
  469. /* change target odpg address */
  470. odpg_addr = adll_tap * burst_len;
  471. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
  472. odpg_addr, MASK_ALL_BITS);
  473. ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
  474. pattern_table[pattern].num_of_phases_tx,
  475. pattern_table[pattern].tx_burst_size,
  476. pattern_table[pattern].num_of_phases_rx,
  477. WR_OP_ODPG_DATA_CMD_BURST_DLY,
  478. ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
  479. /* odpg bist write enable */
  480. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
  481. (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
  482. (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
  483. /* odpg bist read disable */
  484. ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
  485. (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
  486. (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
  487. /* trigger odpg */
  488. mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
  489. }
  490. /* restore subphy's tx adll_tap to its position */
  491. for (subphy = 0; subphy < subphy_max; subphy++) {
  492. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
  493. ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
  494. subphy, DDR_PHY_DATA, CTX_PHY_REG(cs),
  495. wr_ctrl_adll[subphy]);
  496. }
  497. /* read and validate bist (comparing with the base pattern) */
  498. for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
  499. result = 0;
  500. odpg_addr = adll_tap * burst_len;
  501. /* change addr to fit write */
  502. mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr);
  503. mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result);
  504. for (subphy = 0; subphy < subphy_max; subphy++) {
  505. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
  506. idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap;
  507. vw_vector[idx] |= ((result >> subphy) & 0x1);
  508. }
  509. }
  510. /* restore subphy's rx adll_tap to its position */
  511. for (subphy = 0; subphy < subphy_max; subphy++) {
  512. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
  513. ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
  514. subphy, DDR_PHY_DATA, CRX_PHY_REG(cs),
  515. rd_ctrl_adll[subphy]);
  516. }
  517. return MV_OK;
  518. }