ddr3_training_bist.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. static u32 bist_offset = 32;
  13. enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0;
  14. static int ddr3_tip_bist_operation(u32 dev_num,
  15. enum hws_access_type access_type,
  16. u32 if_id,
  17. enum hws_bist_operation oper_type);
  18. /*
  19. * BIST activate
  20. */
  21. int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
  22. enum hws_access_type access_type, u32 if_num,
  23. enum hws_dir direction,
  24. enum hws_stress_jump addr_stress_jump,
  25. enum hws_pattern_duration duration,
  26. enum hws_bist_operation oper_type,
  27. u32 offset, u32 cs_num, u32 pattern_addr_length)
  28. {
  29. u32 tx_burst_size;
  30. u32 delay_between_burst;
  31. u32 rd_mode, val;
  32. u32 poll_cnt = 0, max_poll = 1000, i, start_if, end_if;
  33. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  34. u32 read_data[MAX_INTERFACE_NUM];
  35. struct hws_topology_map *tm = ddr3_get_topology_map();
  36. /* ODPG Write enable from BIST */
  37. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
  38. ODPG_DATA_CONTROL_REG, 0x1, 0x1));
  39. /* ODPG Read enable/disable from BIST */
  40. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
  41. ODPG_DATA_CONTROL_REG,
  42. (direction == OPER_READ) ?
  43. 0x2 : 0, 0x2));
  44. CHECK_STATUS(ddr3_tip_load_pattern_to_odpg(dev_num, access_type, if_num,
  45. pattern, offset));
  46. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
  47. ODPG_DATA_BUF_SIZE_REG,
  48. pattern_addr_length, MASK_ALL_BITS));
  49. tx_burst_size = (direction == OPER_WRITE) ?
  50. pattern_table[pattern].tx_burst_size : 0;
  51. delay_between_burst = (direction == OPER_WRITE) ? 2 : 0;
  52. rd_mode = (direction == OPER_WRITE) ? 1 : 0;
  53. CHECK_STATUS(ddr3_tip_configure_odpg
  54. (dev_num, access_type, if_num, direction,
  55. pattern_table[pattern].num_of_phases_tx, tx_burst_size,
  56. pattern_table[pattern].num_of_phases_rx,
  57. delay_between_burst,
  58. rd_mode, cs_num, addr_stress_jump, duration));
  59. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
  60. ODPG_PATTERN_ADDR_OFFSET_REG,
  61. offset, MASK_ALL_BITS));
  62. if (oper_type == BIST_STOP) {
  63. CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
  64. if_num, BIST_STOP));
  65. } else {
  66. CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
  67. if_num, BIST_START));
  68. if (duration != DURATION_CONT) {
  69. /*
  70. * This pdelay is a WA, becuase polling fives "done"
  71. * also the odpg did nmot finish its task
  72. */
  73. if (access_type == ACCESS_TYPE_MULTICAST) {
  74. start_if = 0;
  75. end_if = MAX_INTERFACE_NUM - 1;
  76. } else {
  77. start_if = if_num;
  78. end_if = if_num;
  79. }
  80. for (i = start_if; i <= end_if; i++) {
  81. VALIDATE_ACTIVE(tm->
  82. if_act_mask, i);
  83. for (poll_cnt = 0; poll_cnt < max_poll;
  84. poll_cnt++) {
  85. CHECK_STATUS(ddr3_tip_if_read
  86. (dev_num,
  87. ACCESS_TYPE_UNICAST,
  88. if_num, ODPG_BIST_DONE,
  89. read_data,
  90. MASK_ALL_BITS));
  91. val = read_data[i];
  92. if ((val & 0x1) == 0x0) {
  93. /*
  94. * In SOC type devices this bit
  95. * is self clear so, if it was
  96. * cleared all good
  97. */
  98. break;
  99. }
  100. }
  101. if (poll_cnt >= max_poll) {
  102. DEBUG_TRAINING_BIST_ENGINE
  103. (DEBUG_LEVEL_ERROR,
  104. ("Bist poll failure 2\n"));
  105. CHECK_STATUS(ddr3_tip_if_write
  106. (dev_num,
  107. ACCESS_TYPE_UNICAST,
  108. if_num,
  109. ODPG_DATA_CONTROL_REG, 0,
  110. MASK_ALL_BITS));
  111. return MV_FAIL;
  112. }
  113. }
  114. CHECK_STATUS(ddr3_tip_bist_operation
  115. (dev_num, access_type, if_num, BIST_STOP));
  116. }
  117. }
  118. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
  119. ODPG_DATA_CONTROL_REG, 0,
  120. MASK_ALL_BITS));
  121. return MV_OK;
  122. }
  123. /*
  124. * BIST read result
  125. */
  126. int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
  127. struct bist_result *pst_bist_result)
  128. {
  129. int ret;
  130. u32 read_data[MAX_INTERFACE_NUM];
  131. struct hws_topology_map *tm = ddr3_get_topology_map();
  132. if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
  133. return MV_NOT_SUPPORTED;
  134. DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE,
  135. ("ddr3_tip_bist_read_result if_id %d\n",
  136. if_id));
  137. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  138. ODPG_BIST_FAILED_DATA_HI_REG, read_data,
  139. MASK_ALL_BITS);
  140. if (ret != MV_OK)
  141. return ret;
  142. pst_bist_result->bist_fail_high = read_data[if_id];
  143. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  144. ODPG_BIST_FAILED_DATA_LOW_REG, read_data,
  145. MASK_ALL_BITS);
  146. if (ret != MV_OK)
  147. return ret;
  148. pst_bist_result->bist_fail_low = read_data[if_id];
  149. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  150. ODPG_BIST_LAST_FAIL_ADDR_REG, read_data,
  151. MASK_ALL_BITS);
  152. if (ret != MV_OK)
  153. return ret;
  154. pst_bist_result->bist_last_fail_addr = read_data[if_id];
  155. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
  156. ODPG_BIST_DATA_ERROR_COUNTER_REG, read_data,
  157. MASK_ALL_BITS);
  158. if (ret != MV_OK)
  159. return ret;
  160. pst_bist_result->bist_error_cnt = read_data[if_id];
  161. return MV_OK;
  162. }
  163. /*
  164. * BIST flow - Activate & read result
  165. */
  166. int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
  167. u32 cs_num)
  168. {
  169. int ret;
  170. u32 i = 0;
  171. u32 win_base;
  172. struct bist_result st_bist_result;
  173. struct hws_topology_map *tm = ddr3_get_topology_map();
  174. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  175. VALIDATE_ACTIVE(tm->if_act_mask, i);
  176. hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base);
  177. ret = ddr3_tip_bist_activate(dev_num, pattern,
  178. ACCESS_TYPE_UNICAST,
  179. i, OPER_WRITE, STRESS_NONE,
  180. DURATION_SINGLE, BIST_START,
  181. bist_offset + win_base,
  182. cs_num, 15);
  183. if (ret != MV_OK) {
  184. printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
  185. return ret;
  186. }
  187. ret = ddr3_tip_bist_activate(dev_num, pattern,
  188. ACCESS_TYPE_UNICAST,
  189. i, OPER_READ, STRESS_NONE,
  190. DURATION_SINGLE, BIST_START,
  191. bist_offset + win_base,
  192. cs_num, 15);
  193. if (ret != MV_OK) {
  194. printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
  195. return ret;
  196. }
  197. ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result);
  198. if (ret != MV_OK) {
  199. printf("ddr3_tip_bist_read_result failed\n");
  200. return ret;
  201. }
  202. result[i] = st_bist_result.bist_error_cnt;
  203. }
  204. return MV_OK;
  205. }
  206. /*
  207. * Set BIST Operation
  208. */
  209. static int ddr3_tip_bist_operation(u32 dev_num,
  210. enum hws_access_type access_type,
  211. u32 if_id, enum hws_bist_operation oper_type)
  212. {
  213. if (oper_type == BIST_STOP) {
  214. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  215. ODPG_BIST_DONE, 1 << 8, 1 << 8));
  216. } else {
  217. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  218. ODPG_BIST_DONE, 1, 1));
  219. }
  220. return MV_OK;
  221. }
  222. /*
  223. * Print BIST result
  224. */
  225. void ddr3_tip_print_bist_res(void)
  226. {
  227. u32 dev_num = 0;
  228. u32 i;
  229. struct bist_result st_bist_result[MAX_INTERFACE_NUM];
  230. int res;
  231. struct hws_topology_map *tm = ddr3_get_topology_map();
  232. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  233. if (IS_ACTIVE(tm->if_act_mask, i) == 0)
  234. continue;
  235. res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]);
  236. if (res != MV_OK) {
  237. DEBUG_TRAINING_BIST_ENGINE(
  238. DEBUG_LEVEL_ERROR,
  239. ("ddr3_tip_bist_read_result failed\n"));
  240. return;
  241. }
  242. }
  243. DEBUG_TRAINING_BIST_ENGINE(
  244. DEBUG_LEVEL_INFO,
  245. ("interface | error_cnt | fail_low | fail_high | fail_addr\n"));
  246. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  247. if (IS_ACTIVE(tm->if_act_mask, i) ==
  248. 0)
  249. continue;
  250. DEBUG_TRAINING_BIST_ENGINE(
  251. DEBUG_LEVEL_INFO,
  252. ("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n",
  253. i, st_bist_result[i].bist_error_cnt,
  254. st_bist_result[i].bist_fail_low,
  255. st_bist_result[i].bist_fail_high,
  256. st_bist_result[i].bist_last_fail_addr));
  257. }
  258. }