ddr3_debug.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include "ddr3_init.h"
  6. u8 is_reg_dump = 0;
  7. u8 debug_pbs = DEBUG_LEVEL_ERROR;
  8. /*
  9. * API to change flags outside of the lib
  10. */
  11. #if defined(SILENT_LIB)
  12. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  13. {
  14. /* do nothing */
  15. }
  16. #else /* SILENT_LIB */
  17. /* Debug flags for other Training modules */
  18. u8 debug_training_static = DEBUG_LEVEL_ERROR;
  19. u8 debug_training = DEBUG_LEVEL_ERROR;
  20. u8 debug_leveling = DEBUG_LEVEL_ERROR;
  21. u8 debug_centralization = DEBUG_LEVEL_ERROR;
  22. u8 debug_training_ip = DEBUG_LEVEL_ERROR;
  23. u8 debug_training_bist = DEBUG_LEVEL_ERROR;
  24. u8 debug_training_hw_alg = DEBUG_LEVEL_ERROR;
  25. u8 debug_training_access = DEBUG_LEVEL_ERROR;
  26. u8 debug_training_device = DEBUG_LEVEL_ERROR;
  27. void mv_ddr_user_log_level_set(enum ddr_lib_debug_block block)
  28. {
  29. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  30. ddr3_hws_set_log_level(block, tm->debug_level);
  31. };
  32. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  33. {
  34. switch (block) {
  35. case DEBUG_BLOCK_STATIC:
  36. debug_training_static = level;
  37. break;
  38. case DEBUG_BLOCK_TRAINING_MAIN:
  39. debug_training = level;
  40. break;
  41. case DEBUG_BLOCK_LEVELING:
  42. debug_leveling = level;
  43. break;
  44. case DEBUG_BLOCK_CENTRALIZATION:
  45. debug_centralization = level;
  46. break;
  47. case DEBUG_BLOCK_PBS:
  48. debug_pbs = level;
  49. break;
  50. case DEBUG_BLOCK_ALG:
  51. debug_training_hw_alg = level;
  52. break;
  53. case DEBUG_BLOCK_DEVICE:
  54. debug_training_device = level;
  55. break;
  56. case DEBUG_BLOCK_ACCESS:
  57. debug_training_access = level;
  58. break;
  59. case DEBUG_STAGES_REG_DUMP:
  60. if (level == DEBUG_LEVEL_TRACE)
  61. is_reg_dump = 1;
  62. else
  63. is_reg_dump = 0;
  64. break;
  65. case DEBUG_BLOCK_ALL:
  66. default:
  67. debug_training_static = level;
  68. debug_training = level;
  69. debug_leveling = level;
  70. debug_centralization = level;
  71. debug_pbs = level;
  72. debug_training_hw_alg = level;
  73. debug_training_access = level;
  74. debug_training_device = level;
  75. }
  76. }
  77. #endif /* SILENT_LIB */
  78. #if defined(DDR_VIEWER_TOOL)
  79. static char *convert_freq(enum hws_ddr_freq freq);
  80. #if defined(EXCLUDE_SWITCH_DEBUG)
  81. u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM];
  82. u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  83. u32 ctrl_adll1[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  84. u32 ctrl_level_phase[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  85. #endif /* EXCLUDE_SWITCH_DEBUG */
  86. #endif /* DDR_VIEWER_TOOL */
  87. struct hws_tip_config_func_db config_func_info[MAX_DEVICE_NUM];
  88. u8 is_default_centralization = 0;
  89. u8 is_tune_result = 0;
  90. u8 is_validate_window_per_if = 0;
  91. u8 is_validate_window_per_pup = 0;
  92. u8 sweep_cnt = 1;
  93. u32 is_bist_reset_bit = 1;
  94. u8 is_run_leveling_sweep_tests;
  95. static struct hws_xsb_info xsb_info[MAX_DEVICE_NUM];
  96. /*
  97. * Dump Dunit & Phy registers
  98. */
  99. int ddr3_tip_reg_dump(u32 dev_num)
  100. {
  101. u32 if_id, reg_addr, data_value, bus_id;
  102. u32 read_data[MAX_INTERFACE_NUM];
  103. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  104. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  105. printf("-- dunit registers --\n");
  106. for (reg_addr = 0x1400; reg_addr < 0x19f0; reg_addr += 4) {
  107. printf("0x%x ", reg_addr);
  108. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  109. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  110. CHECK_STATUS(ddr3_tip_if_read
  111. (dev_num, ACCESS_TYPE_UNICAST,
  112. if_id, reg_addr, read_data,
  113. MASK_ALL_BITS));
  114. printf("0x%x ", read_data[if_id]);
  115. }
  116. printf("\n");
  117. }
  118. printf("-- Phy registers --\n");
  119. for (reg_addr = 0; reg_addr <= 0xff; reg_addr++) {
  120. printf("0x%x ", reg_addr);
  121. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  122. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  123. for (bus_id = 0;
  124. bus_id < octets_per_if_num;
  125. bus_id++) {
  126. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  127. CHECK_STATUS(ddr3_tip_bus_read
  128. (dev_num, if_id,
  129. ACCESS_TYPE_UNICAST, bus_id,
  130. DDR_PHY_DATA, reg_addr,
  131. &data_value));
  132. printf("0x%x ", data_value);
  133. }
  134. for (bus_id = 0;
  135. bus_id < octets_per_if_num;
  136. bus_id++) {
  137. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  138. CHECK_STATUS(ddr3_tip_bus_read
  139. (dev_num, if_id,
  140. ACCESS_TYPE_UNICAST, bus_id,
  141. DDR_PHY_CONTROL, reg_addr,
  142. &data_value));
  143. printf("0x%x ", data_value);
  144. }
  145. }
  146. printf("\n");
  147. }
  148. return MV_OK;
  149. }
  150. /*
  151. * Register access func registration
  152. */
  153. int ddr3_tip_init_config_func(u32 dev_num,
  154. struct hws_tip_config_func_db *config_func)
  155. {
  156. if (config_func == NULL)
  157. return MV_BAD_PARAM;
  158. memcpy(&config_func_info[dev_num], config_func,
  159. sizeof(struct hws_tip_config_func_db));
  160. return MV_OK;
  161. }
  162. /*
  163. * Read training result table
  164. */
  165. int hws_ddr3_tip_read_training_result(
  166. u32 dev_num, enum hws_result result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM])
  167. {
  168. if (result == NULL)
  169. return MV_BAD_PARAM;
  170. memcpy(result, training_result,
  171. sizeof(enum hws_result) *
  172. MAX_STAGE_LIMIT *
  173. MAX_INTERFACE_NUM);
  174. return MV_OK;
  175. }
  176. /*
  177. * Get training result info pointer
  178. */
  179. enum hws_result *ddr3_tip_get_result_ptr(u32 stage)
  180. {
  181. return training_result[stage];
  182. }
  183. /*
  184. * Device info read
  185. */
  186. int ddr3_tip_get_device_info(u32 dev_num, struct ddr3_device_info *info_ptr)
  187. {
  188. if (config_func_info[dev_num].tip_get_device_info_func != NULL) {
  189. return config_func_info[dev_num].
  190. tip_get_device_info_func((u8) dev_num, info_ptr);
  191. }
  192. return MV_FAIL;
  193. }
  194. #if defined(DDR_VIEWER_TOOL)
  195. /*
  196. * Convert freq to character string
  197. */
  198. static char *convert_freq(enum hws_ddr_freq freq)
  199. {
  200. switch (freq) {
  201. case DDR_FREQ_LOW_FREQ:
  202. return "DDR_FREQ_LOW_FREQ";
  203. case DDR_FREQ_400:
  204. return "400";
  205. case DDR_FREQ_533:
  206. return "533";
  207. case DDR_FREQ_667:
  208. return "667";
  209. case DDR_FREQ_800:
  210. return "800";
  211. case DDR_FREQ_933:
  212. return "933";
  213. case DDR_FREQ_1066:
  214. return "1066";
  215. case DDR_FREQ_311:
  216. return "311";
  217. case DDR_FREQ_333:
  218. return "333";
  219. case DDR_FREQ_467:
  220. return "467";
  221. case DDR_FREQ_850:
  222. return "850";
  223. case DDR_FREQ_900:
  224. return "900";
  225. case DDR_FREQ_360:
  226. return "DDR_FREQ_360";
  227. case DDR_FREQ_1000:
  228. return "DDR_FREQ_1000";
  229. default:
  230. return "Unknown Frequency";
  231. }
  232. }
  233. /*
  234. * Convert device ID to character string
  235. */
  236. static char *convert_dev_id(u32 dev_id)
  237. {
  238. switch (dev_id) {
  239. case 0x6800:
  240. return "A38xx";
  241. case 0x6900:
  242. return "A39XX";
  243. case 0xf400:
  244. return "AC3";
  245. case 0xfc00:
  246. return "BC2";
  247. default:
  248. return "Unknown Device";
  249. }
  250. }
  251. /*
  252. * Convert device ID to character string
  253. */
  254. static char *convert_mem_size(u32 dev_id)
  255. {
  256. switch (dev_id) {
  257. case 0:
  258. return "512 MB";
  259. case 1:
  260. return "1 GB";
  261. case 2:
  262. return "2 GB";
  263. case 3:
  264. return "4 GB";
  265. case 4:
  266. return "8 GB";
  267. default:
  268. return "wrong mem size";
  269. }
  270. }
  271. int print_device_info(u8 dev_num)
  272. {
  273. struct ddr3_device_info info_ptr;
  274. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  275. CHECK_STATUS(ddr3_tip_get_device_info(dev_num, &info_ptr));
  276. printf("=== DDR setup START===\n");
  277. printf("\tDevice ID: %s\n", convert_dev_id(info_ptr.device_id));
  278. printf("\tDDR3 CK delay: %d\n", info_ptr.ck_delay);
  279. print_topology(tm);
  280. printf("=== DDR setup END===\n");
  281. return MV_OK;
  282. }
  283. void hws_ddr3_tip_sweep_test(int enable)
  284. {
  285. if (enable) {
  286. is_validate_window_per_if = 1;
  287. is_validate_window_per_pup = 1;
  288. debug_training = DEBUG_LEVEL_TRACE;
  289. } else {
  290. is_validate_window_per_if = 0;
  291. is_validate_window_per_pup = 0;
  292. }
  293. }
  294. #endif /* DDR_VIEWER_TOOL */
  295. char *ddr3_tip_convert_tune_result(enum hws_result tune_result)
  296. {
  297. switch (tune_result) {
  298. case TEST_FAILED:
  299. return "FAILED";
  300. case TEST_SUCCESS:
  301. return "PASS";
  302. case NO_TEST_DONE:
  303. return "NOT COMPLETED";
  304. default:
  305. return "Un-KNOWN";
  306. }
  307. }
  308. /*
  309. * Print log info
  310. */
  311. int ddr3_tip_print_log(u32 dev_num, u32 mem_addr)
  312. {
  313. u32 if_id = 0;
  314. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  315. #if defined(DDR_VIEWER_TOOL)
  316. if ((is_validate_window_per_if != 0) ||
  317. (is_validate_window_per_pup != 0)) {
  318. u32 is_pup_log = 0;
  319. enum hws_ddr_freq freq;
  320. freq = tm->interface_params[first_active_if].memory_freq;
  321. is_pup_log = (is_validate_window_per_pup != 0) ? 1 : 0;
  322. printf("===VALIDATE WINDOW LOG START===\n");
  323. printf("DDR Frequency: %s ======\n", convert_freq(freq));
  324. /* print sweep windows */
  325. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 1, is_pup_log);
  326. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 0, is_pup_log);
  327. #if defined(EXCLUDE_SWITCH_DEBUG)
  328. if (is_run_leveling_sweep_tests == 1) {
  329. ddr3_tip_run_leveling_sweep_test(dev_num, sweep_cnt, 0, is_pup_log);
  330. ddr3_tip_run_leveling_sweep_test(dev_num, sweep_cnt, 1, is_pup_log);
  331. }
  332. #endif /* EXCLUDE_SWITCH_DEBUG */
  333. ddr3_tip_print_all_pbs_result(dev_num);
  334. ddr3_tip_print_wl_supp_result(dev_num);
  335. printf("===VALIDATE WINDOW LOG END ===\n");
  336. CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num));
  337. ddr3_tip_reg_dump(dev_num);
  338. }
  339. #endif /* DDR_VIEWER_TOOL */
  340. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  341. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  342. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  343. ("IF %d Status:\n", if_id));
  344. if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) {
  345. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  346. ("\tInit Controller: %s\n",
  347. ddr3_tip_convert_tune_result
  348. (training_result[INIT_CONTROLLER]
  349. [if_id])));
  350. }
  351. if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) {
  352. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  353. ("\tLow freq Config: %s\n",
  354. ddr3_tip_convert_tune_result
  355. (training_result[SET_LOW_FREQ]
  356. [if_id])));
  357. }
  358. if (mask_tune_func & LOAD_PATTERN_MASK_BIT) {
  359. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  360. ("\tLoad Pattern: %s\n",
  361. ddr3_tip_convert_tune_result
  362. (training_result[LOAD_PATTERN]
  363. [if_id])));
  364. }
  365. if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) {
  366. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  367. ("\tMedium freq Config: %s\n",
  368. ddr3_tip_convert_tune_result
  369. (training_result[SET_MEDIUM_FREQ]
  370. [if_id])));
  371. }
  372. if (mask_tune_func & WRITE_LEVELING_MASK_BIT) {
  373. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  374. ("\tWL: %s\n",
  375. ddr3_tip_convert_tune_result
  376. (training_result[WRITE_LEVELING]
  377. [if_id])));
  378. }
  379. if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) {
  380. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  381. ("\tLoad Pattern: %s\n",
  382. ddr3_tip_convert_tune_result
  383. (training_result[LOAD_PATTERN_2]
  384. [if_id])));
  385. }
  386. if (mask_tune_func & READ_LEVELING_MASK_BIT) {
  387. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  388. ("\tRL: %s\n",
  389. ddr3_tip_convert_tune_result
  390. (training_result[READ_LEVELING]
  391. [if_id])));
  392. }
  393. if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) {
  394. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  395. ("\tWL Supp: %s\n",
  396. ddr3_tip_convert_tune_result
  397. (training_result[WRITE_LEVELING_SUPP]
  398. [if_id])));
  399. }
  400. if (mask_tune_func & PBS_RX_MASK_BIT) {
  401. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  402. ("\tPBS RX: %s\n",
  403. ddr3_tip_convert_tune_result
  404. (training_result[PBS_RX]
  405. [if_id])));
  406. }
  407. if (mask_tune_func & PBS_TX_MASK_BIT) {
  408. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  409. ("\tPBS TX: %s\n",
  410. ddr3_tip_convert_tune_result
  411. (training_result[PBS_TX]
  412. [if_id])));
  413. }
  414. if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
  415. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  416. ("\tTarget freq Config: %s\n",
  417. ddr3_tip_convert_tune_result
  418. (training_result[SET_TARGET_FREQ]
  419. [if_id])));
  420. }
  421. if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) {
  422. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  423. ("\tWL TF: %s\n",
  424. ddr3_tip_convert_tune_result
  425. (training_result[WRITE_LEVELING_TF]
  426. [if_id])));
  427. }
  428. if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) {
  429. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  430. ("\tRL TF: %s\n",
  431. ddr3_tip_convert_tune_result
  432. (training_result[READ_LEVELING_TF]
  433. [if_id])));
  434. }
  435. if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
  436. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  437. ("\tWL TF Supp: %s\n",
  438. ddr3_tip_convert_tune_result
  439. (training_result
  440. [WRITE_LEVELING_SUPP_TF]
  441. [if_id])));
  442. }
  443. if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) {
  444. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  445. ("\tCentr RX: %s\n",
  446. ddr3_tip_convert_tune_result
  447. (training_result[CENTRALIZATION_RX]
  448. [if_id])));
  449. }
  450. if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) {
  451. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  452. ("\tVREF_CALIBRATION: %s\n",
  453. ddr3_tip_convert_tune_result
  454. (training_result[VREF_CALIBRATION]
  455. [if_id])));
  456. }
  457. if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
  458. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  459. ("\tCentr TX: %s\n",
  460. ddr3_tip_convert_tune_result
  461. (training_result[CENTRALIZATION_TX]
  462. [if_id])));
  463. }
  464. }
  465. return MV_OK;
  466. }
  467. #if !defined(EXCLUDE_DEBUG_PRINTS)
  468. /*
  469. * Print stability log info
  470. */
  471. int ddr3_tip_print_stability_log(u32 dev_num)
  472. {
  473. u8 if_id = 0, csindex = 0, bus_id = 0, idx = 0;
  474. u32 reg_data;
  475. u32 read_data[MAX_INTERFACE_NUM];
  476. u32 max_cs = ddr3_tip_max_cs_get(dev_num);
  477. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  478. /* Title print */
  479. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  480. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  481. printf("Title: I/F# , Tj, Calibration_n0, Calibration_p0, Calibration_n1, Calibration_p1, Calibration_n2, Calibration_p2,");
  482. for (csindex = 0; csindex < max_cs; csindex++) {
  483. printf("CS%d , ", csindex);
  484. printf("\n");
  485. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  486. printf("VWTx, VWRx, WL_tot, WL_ADLL, WL_PH, RL_Tot, RL_ADLL, RL_PH, RL_Smp, Cen_tx, Cen_rx, Vref, DQVref,");
  487. printf("\t\t");
  488. for (idx = 0; idx < 11; idx++)
  489. printf("PBSTx-Pad%d,", idx);
  490. printf("\t\t");
  491. for (idx = 0; idx < 11; idx++)
  492. printf("PBSRx-Pad%d,", idx);
  493. }
  494. }
  495. printf("\n");
  496. /* Data print */
  497. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  498. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  499. printf("Data: %d,%d,", if_id,
  500. (config_func_info[dev_num].tip_get_temperature != NULL)
  501. ? (config_func_info[dev_num].
  502. tip_get_temperature(dev_num)) : (0));
  503. CHECK_STATUS(ddr3_tip_if_read
  504. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14c8,
  505. read_data, MASK_ALL_BITS));
  506. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  507. ((read_data[if_id] & 0xfc00) >> 10));
  508. CHECK_STATUS(ddr3_tip_if_read
  509. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17c8,
  510. read_data, MASK_ALL_BITS));
  511. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  512. ((read_data[if_id] & 0xfc00) >> 10));
  513. CHECK_STATUS(ddr3_tip_if_read
  514. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1dc8,
  515. read_data, MASK_ALL_BITS));
  516. printf("%d,%d,", ((read_data[if_id] & 0x3f0000) >> 16),
  517. ((read_data[if_id] & 0xfc00000) >> 22));
  518. for (csindex = 0; csindex < max_cs; csindex++) {
  519. printf("CS%d , ", csindex);
  520. for (bus_id = 0; bus_id < MAX_BUS_NUM; bus_id++) {
  521. printf("\n");
  522. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  523. ddr3_tip_bus_read(dev_num, if_id,
  524. ACCESS_TYPE_UNICAST,
  525. bus_id, DDR_PHY_DATA,
  526. RESULT_PHY_REG +
  527. csindex, &reg_data);
  528. printf("%d,%d,", (reg_data & 0x1f),
  529. ((reg_data & 0x3e0) >> 5));
  530. /* WL */
  531. ddr3_tip_bus_read(dev_num, if_id,
  532. ACCESS_TYPE_UNICAST,
  533. bus_id, DDR_PHY_DATA,
  534. WL_PHY_REG(csindex),
  535. &reg_data);
  536. printf("%d,%d,%d,",
  537. (reg_data & 0x1f) +
  538. ((reg_data & 0x1c0) >> 6) * 32,
  539. (reg_data & 0x1f),
  540. (reg_data & 0x1c0) >> 6);
  541. /* RL */
  542. CHECK_STATUS(ddr3_tip_if_read
  543. (dev_num, ACCESS_TYPE_UNICAST,
  544. if_id,
  545. RD_DATA_SMPL_DLYS_REG,
  546. read_data, MASK_ALL_BITS));
  547. read_data[if_id] =
  548. (read_data[if_id] &
  549. (0x1f << (8 * csindex))) >>
  550. (8 * csindex);
  551. ddr3_tip_bus_read(dev_num, if_id,
  552. ACCESS_TYPE_UNICAST, bus_id,
  553. DDR_PHY_DATA,
  554. RL_PHY_REG(csindex),
  555. &reg_data);
  556. printf("%d,%d,%d,%d,",
  557. (reg_data & 0x1f) +
  558. ((reg_data & 0x1c0) >> 6) * 32 +
  559. read_data[if_id] * 64,
  560. (reg_data & 0x1f),
  561. ((reg_data & 0x1c0) >> 6),
  562. read_data[if_id]);
  563. /* Centralization */
  564. ddr3_tip_bus_read(dev_num, if_id,
  565. ACCESS_TYPE_UNICAST, bus_id,
  566. DDR_PHY_DATA,
  567. CTX_PHY_REG(csindex),
  568. &reg_data);
  569. printf("%d,", (reg_data & 0x3f));
  570. ddr3_tip_bus_read(dev_num, if_id,
  571. ACCESS_TYPE_UNICAST, bus_id,
  572. DDR_PHY_DATA,
  573. CRX_PHY_REG(csindex),
  574. &reg_data);
  575. printf("%d,", (reg_data & 0x1f));
  576. /* Vref */
  577. ddr3_tip_bus_read(dev_num, if_id,
  578. ACCESS_TYPE_UNICAST, bus_id,
  579. DDR_PHY_DATA,
  580. PAD_CFG_PHY_REG,
  581. &reg_data);
  582. printf("%d,", (reg_data & 0x7));
  583. /* DQVref */
  584. /* Need to add the Read Function from device */
  585. printf("%d,", 0);
  586. printf("\t\t");
  587. for (idx = 0; idx < 11; idx++) {
  588. ddr3_tip_bus_read(dev_num, if_id,
  589. ACCESS_TYPE_UNICAST,
  590. bus_id, DDR_PHY_DATA,
  591. 0x10 +
  592. 16 * csindex +
  593. idx, &reg_data);
  594. printf("%d,", (reg_data & 0x3f));
  595. }
  596. printf("\t\t");
  597. for (idx = 0; idx < 11; idx++) {
  598. ddr3_tip_bus_read(dev_num, if_id,
  599. ACCESS_TYPE_UNICAST,
  600. bus_id, DDR_PHY_DATA,
  601. 0x50 +
  602. 16 * csindex +
  603. idx, &reg_data);
  604. printf("%d,", (reg_data & 0x3f));
  605. }
  606. }
  607. }
  608. }
  609. printf("\n");
  610. return MV_OK;
  611. }
  612. #endif /* EXCLUDE_DEBUG_PRINTS */
  613. /*
  614. * Register XSB information
  615. */
  616. int ddr3_tip_register_xsb_info(u32 dev_num, struct hws_xsb_info *xsb_info_table)
  617. {
  618. memcpy(&xsb_info[dev_num], xsb_info_table, sizeof(struct hws_xsb_info));
  619. return MV_OK;
  620. }
  621. /*
  622. * Read ADLL Value
  623. */
  624. int ddr3_tip_read_adll_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  625. u32 reg_addr, u32 mask)
  626. {
  627. u32 data_value;
  628. u32 if_id = 0, bus_id = 0;
  629. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  630. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  631. /*
  632. * multi CS support - reg_addr is calucalated in calling function
  633. * with CS offset
  634. */
  635. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  636. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  637. for (bus_id = 0; bus_id < octets_per_if_num;
  638. bus_id++) {
  639. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  640. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  641. ACCESS_TYPE_UNICAST,
  642. bus_id,
  643. DDR_PHY_DATA, reg_addr,
  644. &data_value));
  645. pup_values[if_id *
  646. octets_per_if_num + bus_id] =
  647. data_value & mask;
  648. }
  649. }
  650. return 0;
  651. }
  652. /*
  653. * Write ADLL Value
  654. */
  655. int ddr3_tip_write_adll_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  656. u32 reg_addr)
  657. {
  658. u32 if_id = 0, bus_id = 0;
  659. u32 data;
  660. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  661. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  662. /*
  663. * multi CS support - reg_addr is calucalated in calling function
  664. * with CS offset
  665. */
  666. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  667. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  668. for (bus_id = 0; bus_id < octets_per_if_num;
  669. bus_id++) {
  670. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  671. data = pup_values[if_id *
  672. octets_per_if_num +
  673. bus_id];
  674. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  675. ACCESS_TYPE_UNICAST,
  676. if_id,
  677. ACCESS_TYPE_UNICAST,
  678. bus_id, DDR_PHY_DATA,
  679. reg_addr, data));
  680. }
  681. }
  682. return 0;
  683. }
  684. /**
  685. * Read Phase Value
  686. */
  687. int read_phase_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  688. int reg_addr, u32 mask)
  689. {
  690. u32 data_value;
  691. u32 if_id = 0, bus_id = 0;
  692. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  693. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  694. /* multi CS support - reg_addr is calucalated in calling function with CS offset */
  695. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  696. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  697. for (bus_id = 0; bus_id < octets_per_if_num; bus_id++) {
  698. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  699. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  700. ACCESS_TYPE_UNICAST,
  701. bus_id,
  702. DDR_PHY_DATA, reg_addr,
  703. &data_value));
  704. pup_values[if_id * octets_per_if_num + bus_id] = data_value & mask;
  705. }
  706. }
  707. return 0;
  708. }
  709. /**
  710. * Write Leveling Value
  711. */
  712. int write_leveling_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  713. u32 pup_ph_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], int reg_addr)
  714. {
  715. u32 if_id = 0, bus_id = 0;
  716. u32 data;
  717. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  718. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  719. /* multi CS support - reg_addr is calucalated in calling function with CS offset */
  720. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  721. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  722. for (bus_id = 0 ; bus_id < octets_per_if_num ; bus_id++) {
  723. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
  724. data = pup_values[if_id * octets_per_if_num + bus_id] +
  725. pup_ph_values[if_id * octets_per_if_num + bus_id];
  726. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  727. ACCESS_TYPE_UNICAST,
  728. if_id,
  729. ACCESS_TYPE_UNICAST,
  730. bus_id,
  731. DDR_PHY_DATA,
  732. reg_addr,
  733. data));
  734. }
  735. }
  736. return 0;
  737. }
  738. #if !defined(EXCLUDE_SWITCH_DEBUG)
  739. struct hws_tip_config_func_db config_func_info[MAX_DEVICE_NUM];
  740. u32 start_xsb_offset = 0;
  741. u8 is_rl_old = 0;
  742. u8 is_freq_old = 0;
  743. u8 is_dfs_disabled = 0;
  744. u32 default_centrlization_value = 0x12;
  745. u32 activate_select_before_run_alg = 1, activate_deselect_after_run_alg = 1,
  746. rl_test = 0, reset_read_fifo = 0;
  747. int debug_acc = 0;
  748. u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM];
  749. u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  750. u32 xsb_test_table[][8] = {
  751. {0x00000000, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555,
  752. 0x66666666, 0x77777777},
  753. {0x88888888, 0x99999999, 0xaaaaaaaa, 0xbbbbbbbb, 0xcccccccc, 0xdddddddd,
  754. 0xeeeeeeee, 0xffffffff},
  755. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  756. 0x00000000, 0xffffffff},
  757. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  758. 0x00000000, 0xffffffff},
  759. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  760. 0x00000000, 0xffffffff},
  761. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  762. 0x00000000, 0xffffffff},
  763. {0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
  764. 0xffffffff, 0xffffffff},
  765. {0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000,
  766. 0x00000000, 0x00000000},
  767. {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
  768. 0xffffffff, 0xffffffff}
  769. };
  770. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr);
  771. int ddr3_tip_print_adll(void)
  772. {
  773. u32 bus_cnt = 0, if_id, data_p1, data_p2, ui_data3, dev_num = 0;
  774. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  775. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  776. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  777. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  778. for (bus_cnt = 0; bus_cnt < octets_per_if_num;
  779. bus_cnt++) {
  780. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt);
  781. CHECK_STATUS(ddr3_tip_bus_read
  782. (dev_num, if_id,
  783. ACCESS_TYPE_UNICAST, bus_cnt,
  784. DDR_PHY_DATA, 0x1, &data_p1));
  785. CHECK_STATUS(ddr3_tip_bus_read
  786. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  787. bus_cnt, DDR_PHY_DATA, 0x2, &data_p2));
  788. CHECK_STATUS(ddr3_tip_bus_read
  789. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  790. bus_cnt, DDR_PHY_DATA, 0x3, &ui_data3));
  791. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  792. (" IF %d bus_cnt %d phy_reg_1_data 0x%x phy_reg_2_data 0x%x phy_reg_3_data 0x%x\n",
  793. if_id, bus_cnt, data_p1, data_p2,
  794. ui_data3));
  795. }
  796. }
  797. return MV_OK;
  798. }
  799. /*
  800. * Set attribute value
  801. */
  802. int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value)
  803. {
  804. int ret;
  805. u32 *ptr_flag = NULL;
  806. ret = ddr3_tip_access_atr(dev_num, flag_id, value, &ptr_flag);
  807. if (ptr_flag != NULL) {
  808. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x (was 0x%x)\n",
  809. flag_id, value, *ptr_flag);
  810. *ptr_flag = value;
  811. } else {
  812. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x\n",
  813. flag_id, value);
  814. }
  815. return ret;
  816. }
  817. /*
  818. * Access attribute
  819. */
  820. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr)
  821. {
  822. u32 tmp_val = 0, if_id = 0, pup_id = 0;
  823. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  824. *ptr = NULL;
  825. switch (flag_id) {
  826. case 0:
  827. *ptr = (u32 *)&(tm->if_act_mask);
  828. break;
  829. case 0x1:
  830. *ptr = (u32 *)&mask_tune_func;
  831. break;
  832. case 0x2:
  833. low_freq = (enum hws_ddr_freq)value;
  834. break;
  835. case 0x3:
  836. medium_freq = (enum hws_ddr_freq)value;
  837. break;
  838. case 0x4:
  839. *ptr = (u32 *)&generic_init_controller;
  840. break;
  841. case 0x8:
  842. *ptr = (u32 *)&start_xsb_offset;
  843. break;
  844. case 0x20:
  845. *ptr = (u32 *)&is_rl_old;
  846. break;
  847. case 0x21:
  848. *ptr = (u32 *)&is_freq_old;
  849. break;
  850. case 0x23:
  851. *ptr = (u32 *)&is_dfs_disabled;
  852. break;
  853. case 0x24:
  854. *ptr = (u32 *)&is_pll_before_init;
  855. break;
  856. case 0x25:
  857. *ptr = (u32 *)&is_adll_calib_before_init;
  858. break;
  859. case 0x28:
  860. *ptr = (u32 *)&is_tune_result;
  861. break;
  862. case 0x29:
  863. *ptr = (u32 *)&is_validate_window_per_if;
  864. break;
  865. case 0x2a:
  866. *ptr = (u32 *)&is_validate_window_per_pup;
  867. break;
  868. case 0x30:
  869. *ptr = (u32 *)&sweep_cnt;
  870. break;
  871. case 0x31:
  872. *ptr = (u32 *)&is_bist_reset_bit;
  873. break;
  874. case 0x32:
  875. *ptr = (u32 *)&is_dfs_in_init;
  876. break;
  877. case 0x33:
  878. *ptr = (u32 *)&g_zpodt_data;
  879. break;
  880. case 0x34:
  881. *ptr = (u32 *)&g_znodt_data;
  882. break;
  883. case 0x35:
  884. break;
  885. case 0x36:
  886. *ptr = (u32 *)&(freq_val[DDR_FREQ_LOW_FREQ]);
  887. break;
  888. case 0x37:
  889. *ptr = (u32 *)&start_pattern;
  890. break;
  891. case 0x38:
  892. *ptr = (u32 *)&end_pattern;
  893. break;
  894. case 0x39:
  895. *ptr = (u32 *)&phy_reg0_val;
  896. break;
  897. case 0x4a:
  898. *ptr = (u32 *)&phy_reg1_val;
  899. break;
  900. case 0x4b:
  901. *ptr = (u32 *)&phy_reg2_val;
  902. break;
  903. case 0x4c:
  904. *ptr = (u32 *)&phy_reg3_val;
  905. break;
  906. case 0x4e:
  907. sweep_pattern = (enum hws_pattern)value;
  908. break;
  909. case 0x51:
  910. *ptr = (u32 *)&g_znri_data;
  911. break;
  912. case 0x52:
  913. *ptr = (u32 *)&g_zpri_data;
  914. break;
  915. case 0x53:
  916. *ptr = (u32 *)&finger_test;
  917. break;
  918. case 0x54:
  919. *ptr = (u32 *)&n_finger_start;
  920. break;
  921. case 0x55:
  922. *ptr = (u32 *)&n_finger_end;
  923. break;
  924. case 0x56:
  925. *ptr = (u32 *)&p_finger_start;
  926. break;
  927. case 0x57:
  928. *ptr = (u32 *)&p_finger_end;
  929. break;
  930. case 0x58:
  931. *ptr = (u32 *)&p_finger_step;
  932. break;
  933. case 0x59:
  934. *ptr = (u32 *)&n_finger_step;
  935. break;
  936. case 0x5a:
  937. *ptr = (u32 *)&g_znri_ctrl;
  938. break;
  939. case 0x5b:
  940. *ptr = (u32 *)&g_zpri_ctrl;
  941. break;
  942. case 0x5c:
  943. *ptr = (u32 *)&is_reg_dump;
  944. break;
  945. case 0x5d:
  946. *ptr = (u32 *)&vref_init_val;
  947. break;
  948. case 0x5e:
  949. *ptr = (u32 *)&mode_2t;
  950. break;
  951. case 0x5f:
  952. *ptr = (u32 *)&xsb_validate_type;
  953. break;
  954. case 0x60:
  955. *ptr = (u32 *)&xsb_validation_base_address;
  956. break;
  957. case 0x67:
  958. *ptr = (u32 *)&activate_select_before_run_alg;
  959. break;
  960. case 0x68:
  961. *ptr = (u32 *)&activate_deselect_after_run_alg;
  962. break;
  963. case 0x69:
  964. *ptr = (u32 *)&odt_additional;
  965. break;
  966. case 0x70:
  967. *ptr = (u32 *)&debug_mode;
  968. break;
  969. case 0x71:
  970. pbs_pattern = (enum hws_pattern)value;
  971. break;
  972. case 0x72:
  973. *ptr = (u32 *)&delay_enable;
  974. break;
  975. case 0x73:
  976. *ptr = (u32 *)&ck_delay;
  977. break;
  978. case 0x75:
  979. *ptr = (u32 *)&ca_delay;
  980. break;
  981. case 0x100:
  982. *ptr = (u32 *)&debug_dunit;
  983. break;
  984. case 0x101:
  985. debug_acc = (int)value;
  986. break;
  987. case 0x102:
  988. debug_training = (u8)value;
  989. break;
  990. case 0x103:
  991. debug_training_bist = (u8)value;
  992. break;
  993. case 0x104:
  994. debug_centralization = (u8)value;
  995. break;
  996. case 0x105:
  997. debug_training_ip = (u8)value;
  998. break;
  999. case 0x106:
  1000. debug_leveling = (u8)value;
  1001. break;
  1002. case 0x107:
  1003. debug_pbs = (u8)value;
  1004. break;
  1005. case 0x108:
  1006. debug_training_static = (u8)value;
  1007. break;
  1008. case 0x109:
  1009. debug_training_access = (u8)value;
  1010. break;
  1011. case 0x112:
  1012. *ptr = &start_pattern;
  1013. break;
  1014. case 0x113:
  1015. *ptr = &end_pattern;
  1016. break;
  1017. default:
  1018. if ((flag_id >= 0x200) && (flag_id < 0x210)) {
  1019. if_id = flag_id - 0x200;
  1020. *ptr = (u32 *)&(tm->interface_params
  1021. [if_id].memory_freq);
  1022. } else if ((flag_id >= 0x210) && (flag_id < 0x220)) {
  1023. if_id = flag_id - 0x210;
  1024. *ptr = (u32 *)&(tm->interface_params
  1025. [if_id].speed_bin_index);
  1026. } else if ((flag_id >= 0x220) && (flag_id < 0x230)) {
  1027. if_id = flag_id - 0x220;
  1028. *ptr = (u32 *)&(tm->interface_params
  1029. [if_id].bus_width);
  1030. } else if ((flag_id >= 0x230) && (flag_id < 0x240)) {
  1031. if_id = flag_id - 0x230;
  1032. *ptr = (u32 *)&(tm->interface_params
  1033. [if_id].memory_size);
  1034. } else if ((flag_id >= 0x240) && (flag_id < 0x250)) {
  1035. if_id = flag_id - 0x240;
  1036. *ptr = (u32 *)&(tm->interface_params
  1037. [if_id].cas_l);
  1038. } else if ((flag_id >= 0x250) && (flag_id < 0x260)) {
  1039. if_id = flag_id - 0x250;
  1040. *ptr = (u32 *)&(tm->interface_params
  1041. [if_id].cas_wl);
  1042. } else if ((flag_id >= 0x270) && (flag_id < 0x2cf)) {
  1043. if_id = (flag_id - 0x270) / MAX_BUS_NUM;
  1044. pup_id = (flag_id - 0x270) % MAX_BUS_NUM;
  1045. *ptr = (u32 *)&(tm->interface_params[if_id].
  1046. as_bus_params[pup_id].is_ck_swap);
  1047. } else if ((flag_id >= 0x2d0) && (flag_id < 0x32f)) {
  1048. if_id = (flag_id - 0x2d0) / MAX_BUS_NUM;
  1049. pup_id = (flag_id - 0x2d0) % MAX_BUS_NUM;
  1050. *ptr = (u32 *)&(tm->interface_params[if_id].
  1051. as_bus_params[pup_id].is_dqs_swap);
  1052. } else if ((flag_id >= 0x330) && (flag_id < 0x38f)) {
  1053. if_id = (flag_id - 0x330) / MAX_BUS_NUM;
  1054. pup_id = (flag_id - 0x330) % MAX_BUS_NUM;
  1055. *ptr = (u32 *)&(tm->interface_params[if_id].
  1056. as_bus_params[pup_id].cs_bitmask);
  1057. } else if ((flag_id >= 0x390) && (flag_id < 0x3ef)) {
  1058. if_id = (flag_id - 0x390) / MAX_BUS_NUM;
  1059. pup_id = (flag_id - 0x390) % MAX_BUS_NUM;
  1060. *ptr = (u32 *)&(tm->interface_params
  1061. [if_id].as_bus_params
  1062. [pup_id].mirror_enable_bitmask);
  1063. } else if ((flag_id >= 0x500) && (flag_id <= 0x50f)) {
  1064. tmp_val = flag_id - 0x320;
  1065. *ptr = (u32 *)&(clamp_tbl[tmp_val]);
  1066. } else {
  1067. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1068. ("flag_id out of boundary %d\n",
  1069. flag_id));
  1070. return MV_BAD_PARAM;
  1071. }
  1072. }
  1073. return MV_OK;
  1074. }
  1075. #endif /* EXCLUDE_SWITCH_DEBUG */
  1076. #if defined(DDR_VIEWER_TOOL)
  1077. /*
  1078. * Print ADLL
  1079. */
  1080. int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM])
  1081. {
  1082. u32 i, j;
  1083. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  1084. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1085. for (j = 0; j < octets_per_if_num; j++) {
  1086. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, j);
  1087. for (i = 0; i < MAX_INTERFACE_NUM; i++)
  1088. printf("%d ,", adll[i * octets_per_if_num + j]);
  1089. }
  1090. printf("\n");
  1091. return MV_OK;
  1092. }
  1093. int print_ph(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM])
  1094. {
  1095. u32 i, j;
  1096. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  1097. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1098. for (j = 0; j < octets_per_if_num; j++) {
  1099. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, j);
  1100. for (i = 0; i < MAX_INTERFACE_NUM; i++)
  1101. printf("%d ,", adll[i * octets_per_if_num + j] >> 6);
  1102. }
  1103. printf("\n");
  1104. return MV_OK;
  1105. }
  1106. #endif /* DDR_VIEWER_TOOL */
  1107. #if !defined(EXCLUDE_SWITCH_DEBUG)
  1108. /* byte_index - only byte 0, 1, 2, or 3, oxff - test all bytes */
  1109. static u32 ddr3_tip_compare(u32 if_id, u32 *p_src, u32 *p_dst,
  1110. u32 byte_index)
  1111. {
  1112. u32 burst_cnt = 0, addr_offset, i_id;
  1113. int b_is_fail = 0;
  1114. addr_offset =
  1115. (byte_index ==
  1116. 0xff) ? (u32) 0xffffffff : (u32) (0xff << (byte_index * 8));
  1117. for (burst_cnt = 0; burst_cnt < EXT_ACCESS_BURST_LENGTH; burst_cnt++) {
  1118. if ((p_src[burst_cnt] & addr_offset) !=
  1119. (p_dst[if_id] & addr_offset))
  1120. b_is_fail = 1;
  1121. }
  1122. if (b_is_fail == 1) {
  1123. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1124. ("IF %d exp: ", if_id));
  1125. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1126. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1127. ("0x%8x ", p_src[i_id]));
  1128. }
  1129. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1130. ("\n_i_f %d rcv: ", if_id));
  1131. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1132. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1133. ("(0x%8x ", p_dst[i_id]));
  1134. }
  1135. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("\n "));
  1136. }
  1137. return b_is_fail;
  1138. }
  1139. #endif /* EXCLUDE_SWITCH_DEBUG */
  1140. #if defined(DDR_VIEWER_TOOL)
  1141. /*
  1142. * Sweep validation
  1143. */
  1144. int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
  1145. u32 mode)
  1146. {
  1147. u32 pup = 0, start_pup = 0, end_pup = 0;
  1148. u32 adll = 0, rep = 0, pattern_idx = 0;
  1149. u32 res[MAX_INTERFACE_NUM] = { 0 };
  1150. int if_id = 0;
  1151. u32 adll_value = 0;
  1152. u32 reg;
  1153. enum hws_access_type pup_access;
  1154. u32 cs;
  1155. u32 max_cs = ddr3_tip_max_cs_get(dev_num);
  1156. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  1157. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1158. repeat_num = 2;
  1159. if (mode == 1) {
  1160. /* per pup */
  1161. start_pup = 0;
  1162. end_pup = octets_per_if_num - 1;
  1163. pup_access = ACCESS_TYPE_UNICAST;
  1164. } else {
  1165. start_pup = 0;
  1166. end_pup = 0;
  1167. pup_access = ACCESS_TYPE_MULTICAST;
  1168. }
  1169. for (cs = 0; cs < max_cs; cs++) {
  1170. reg = (direction == 0) ? CTX_PHY_REG(cs) : CRX_PHY_REG(cs);
  1171. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1172. for (if_id = 0;
  1173. if_id <= MAX_INTERFACE_NUM - 1;
  1174. if_id++) {
  1175. VALIDATE_IF_ACTIVE
  1176. (tm->if_act_mask,
  1177. if_id);
  1178. for (pup = start_pup; pup <= end_pup; pup++) {
  1179. ctrl_sweepres[adll][if_id][pup] =
  1180. 0;
  1181. }
  1182. }
  1183. }
  1184. for (adll = 0; adll < (MAX_INTERFACE_NUM * MAX_BUS_NUM); adll++)
  1185. ctrl_adll[adll] = 0;
  1186. /* Save DQS value(after algorithm run) */
  1187. ddr3_tip_read_adll_value(dev_num, ctrl_adll,
  1188. reg, MASK_ALL_BITS);
  1189. /*
  1190. * Sweep ADLL from 0:31 on all I/F on all Pup and perform
  1191. * BIST on each stage.
  1192. */
  1193. for (pup = start_pup; pup <= end_pup; pup++) {
  1194. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1195. for (rep = 0; rep < repeat_num; rep++) {
  1196. for (pattern_idx = PATTERN_KILLER_DQ0;
  1197. pattern_idx < PATTERN_LAST;
  1198. pattern_idx++) {
  1199. adll_value =
  1200. (direction == 0) ? (adll * 2) : adll;
  1201. CHECK_STATUS(ddr3_tip_bus_write
  1202. (dev_num, ACCESS_TYPE_MULTICAST, 0,
  1203. pup_access, pup, DDR_PHY_DATA,
  1204. reg, adll_value));
  1205. hws_ddr3_run_bist(dev_num, sweep_pattern, res,
  1206. cs);
  1207. /* ddr3_tip_reset_fifo_ptr(dev_num); */
  1208. for (if_id = 0;
  1209. if_id < MAX_INTERFACE_NUM;
  1210. if_id++) {
  1211. VALIDATE_IF_ACTIVE
  1212. (tm->if_act_mask,
  1213. if_id);
  1214. ctrl_sweepres[adll][if_id][pup]
  1215. += res[if_id];
  1216. if (mode == 1) {
  1217. CHECK_STATUS
  1218. (ddr3_tip_bus_write
  1219. (dev_num,
  1220. ACCESS_TYPE_UNICAST,
  1221. if_id,
  1222. ACCESS_TYPE_UNICAST,
  1223. pup,
  1224. DDR_PHY_DATA,
  1225. reg,
  1226. ctrl_adll[if_id *
  1227. cs *
  1228. octets_per_if_num
  1229. + pup]));
  1230. }
  1231. }
  1232. }
  1233. }
  1234. }
  1235. }
  1236. printf("Final, CS %d,%s, Sweep, Result, Adll,", cs,
  1237. ((direction == 0) ? "TX" : "RX"));
  1238. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1239. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1240. if (mode == 1) {
  1241. for (pup = start_pup; pup <= end_pup; pup++) {
  1242. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
  1243. printf("I/F%d-PHY%d , ", if_id, pup);
  1244. }
  1245. } else {
  1246. printf("I/F%d , ", if_id);
  1247. }
  1248. }
  1249. printf("\n");
  1250. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1251. adll_value = (direction == 0) ? (adll * 2) : adll;
  1252. printf("Final,%s, Sweep, Result, %d ,",
  1253. ((direction == 0) ? "TX" : "RX"), adll_value);
  1254. for (if_id = 0;
  1255. if_id <= MAX_INTERFACE_NUM - 1;
  1256. if_id++) {
  1257. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1258. for (pup = start_pup; pup <= end_pup; pup++) {
  1259. printf("%8d , ",
  1260. ctrl_sweepres[adll][if_id]
  1261. [pup]);
  1262. }
  1263. }
  1264. printf("\n");
  1265. }
  1266. /*
  1267. * Write back to the phy the Rx DQS value, we store in
  1268. * the beginning.
  1269. */
  1270. ddr3_tip_write_adll_value(dev_num, ctrl_adll, reg);
  1271. /* print adll results */
  1272. ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, MASK_ALL_BITS);
  1273. printf("%s, DQS, ADLL,,,", (direction == 0) ? "Tx" : "Rx");
  1274. print_adll(dev_num, ctrl_adll);
  1275. }
  1276. ddr3_tip_reset_fifo_ptr(dev_num);
  1277. return 0;
  1278. }
  1279. #if defined(EXCLUDE_SWITCH_DEBUG)
  1280. int ddr3_tip_run_leveling_sweep_test(int dev_num, u32 repeat_num,
  1281. u32 direction, u32 mode)
  1282. {
  1283. u32 pup = 0, start_pup = 0, end_pup = 0, start_adll = 0;
  1284. u32 adll = 0, rep = 0, pattern_idx = 0;
  1285. u32 read_data[MAX_INTERFACE_NUM];
  1286. u32 res[MAX_INTERFACE_NUM] = { 0 };
  1287. int if_id = 0, gap = 0;
  1288. u32 adll_value = 0;
  1289. u32 reg;
  1290. enum hws_access_type pup_access;
  1291. u32 cs;
  1292. u32 max_cs = ddr3_tip_max_cs_get(dev_num);
  1293. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  1294. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1295. if (mode == 1) { /* per pup */
  1296. start_pup = 0;
  1297. end_pup = octets_per_if_num - 1;
  1298. pup_access = ACCESS_TYPE_UNICAST;
  1299. } else {
  1300. start_pup = 0;
  1301. end_pup = 0;
  1302. pup_access = ACCESS_TYPE_MULTICAST;
  1303. }
  1304. for (cs = 0; cs < max_cs; cs++) {
  1305. reg = (direction == 0) ? WL_PHY_REG(cs) : RL_PHY_REG(cs);
  1306. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1307. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  1308. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1309. for (pup = start_pup; pup <= end_pup; pup++)
  1310. ctrl_sweepres[adll][if_id][pup] = 0;
  1311. }
  1312. }
  1313. for (adll = 0; adll < MAX_INTERFACE_NUM * MAX_BUS_NUM; adll++) {
  1314. ctrl_adll[adll] = 0;
  1315. ctrl_level_phase[adll] = 0;
  1316. ctrl_adll1[adll] = 0;
  1317. }
  1318. /* save leveling value after running algorithm */
  1319. ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, 0x1f);
  1320. read_phase_value(dev_num, ctrl_level_phase, reg, 0x7 << 6);
  1321. if (direction == 0)
  1322. ddr3_tip_read_adll_value(dev_num, ctrl_adll1,
  1323. CTX_PHY_REG(cs), MASK_ALL_BITS);
  1324. /* Sweep ADLL from 0 to 31 on all interfaces, all pups,
  1325. * and perform BIST on each stage
  1326. */
  1327. for (pup = start_pup; pup <= end_pup; pup++) {
  1328. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1329. for (rep = 0; rep < repeat_num; rep++) {
  1330. adll_value = (direction == 0) ? (adll * 2) : (adll * 3);
  1331. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1332. start_adll = ctrl_adll[if_id * cs * octets_per_if_num + pup] +
  1333. (ctrl_level_phase[if_id * cs *
  1334. octets_per_if_num +
  1335. pup] >> 6) * 32;
  1336. if (direction == 0)
  1337. start_adll = (start_adll > 32) ? (start_adll - 32) : 0;
  1338. else
  1339. start_adll = (start_adll > 48) ? (start_adll - 48) : 0;
  1340. adll_value += start_adll;
  1341. gap = ctrl_adll1[if_id * cs * octets_per_if_num + pup] -
  1342. ctrl_adll[if_id * cs * octets_per_if_num + pup];
  1343. gap = (((adll_value % 32) + gap) % 64);
  1344. adll_value = ((adll_value % 32) +
  1345. (((adll_value - (adll_value % 32)) / 32) << 6));
  1346. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  1347. ACCESS_TYPE_UNICAST,
  1348. if_id,
  1349. pup_access,
  1350. pup,
  1351. DDR_PHY_DATA,
  1352. reg,
  1353. adll_value));
  1354. if (direction == 0)
  1355. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  1356. ACCESS_TYPE_UNICAST,
  1357. if_id,
  1358. pup_access,
  1359. pup,
  1360. DDR_PHY_DATA,
  1361. CTX_PHY_REG(cs),
  1362. gap));
  1363. }
  1364. for (pattern_idx = PATTERN_KILLER_DQ0;
  1365. pattern_idx < PATTERN_LAST;
  1366. pattern_idx++) {
  1367. hws_ddr3_run_bist(dev_num, sweep_pattern, res, cs);
  1368. ddr3_tip_reset_fifo_ptr(dev_num);
  1369. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1370. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1371. if (pup != 4) { /* TODO: remove literal */
  1372. ctrl_sweepres[adll][if_id][pup] += res[if_id];
  1373. } else {
  1374. CHECK_STATUS(ddr3_tip_if_read(dev_num,
  1375. ACCESS_TYPE_UNICAST,
  1376. if_id,
  1377. 0x1458,
  1378. read_data,
  1379. MASK_ALL_BITS));
  1380. ctrl_sweepres[adll][if_id][pup] += read_data[if_id];
  1381. CHECK_STATUS(ddr3_tip_if_write(dev_num,
  1382. ACCESS_TYPE_UNICAST,
  1383. if_id,
  1384. 0x1458,
  1385. 0x0,
  1386. 0xFFFFFFFF));
  1387. CHECK_STATUS(ddr3_tip_if_write(dev_num,
  1388. ACCESS_TYPE_UNICAST,
  1389. if_id,
  1390. 0x145C,
  1391. 0x0,
  1392. 0xFFFFFFFF));
  1393. }
  1394. }
  1395. }
  1396. }
  1397. }
  1398. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1399. start_adll = ctrl_adll[if_id * cs * octets_per_if_num + pup] +
  1400. ctrl_level_phase[if_id * cs * octets_per_if_num + pup];
  1401. CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST, if_id, pup_access, pup,
  1402. DDR_PHY_DATA, reg, start_adll));
  1403. if (direction == 0)
  1404. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  1405. ACCESS_TYPE_UNICAST,
  1406. if_id,
  1407. pup_access,
  1408. pup,
  1409. DDR_PHY_DATA,
  1410. CTX_PHY_REG(cs),
  1411. ctrl_adll1[if_id *
  1412. cs *
  1413. octets_per_if_num +
  1414. pup]));
  1415. }
  1416. }
  1417. printf("Final,CS %d,%s,Leveling,Result,Adll,", cs, ((direction == 0) ? "TX" : "RX"));
  1418. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1419. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1420. if (mode == 1) {
  1421. for (pup = start_pup; pup <= end_pup; pup++) {
  1422. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
  1423. printf("I/F%d-PHY%d , ", if_id, pup);
  1424. }
  1425. } else {
  1426. printf("I/F%d , ", if_id);
  1427. }
  1428. }
  1429. printf("\n");
  1430. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1431. adll_value = (direction == 0) ? ((adll * 2) - 32) : ((adll * 3) - 48);
  1432. printf("Final,%s,LevelingSweep,Result, %d ,", ((direction == 0) ? "TX" : "RX"), adll_value);
  1433. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1434. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1435. for (pup = start_pup; pup <= end_pup; pup++)
  1436. printf("%8d , ", ctrl_sweepres[adll][if_id][pup]);
  1437. }
  1438. printf("\n");
  1439. }
  1440. /* write back to the phy the Rx DQS value, we store in the beginning */
  1441. write_leveling_value(dev_num, ctrl_adll, ctrl_level_phase, reg);
  1442. if (direction == 0)
  1443. ddr3_tip_write_adll_value(dev_num, ctrl_adll1, CTX_PHY_REG(cs));
  1444. /* print adll results */
  1445. ddr3_tip_read_adll_value(dev_num, ctrl_adll, reg, MASK_ALL_BITS);
  1446. printf("%s,DQS,Leveling,,,", (direction == 0) ? "Tx" : "Rx");
  1447. print_adll(dev_num, ctrl_adll);
  1448. print_ph(dev_num, ctrl_level_phase);
  1449. }
  1450. ddr3_tip_reset_fifo_ptr(dev_num);
  1451. return 0;
  1452. }
  1453. #endif /* EXCLUDE_SWITCH_DEBUG */
  1454. void print_topology(struct mv_ddr_topology_map *topology_db)
  1455. {
  1456. u32 ui, uj;
  1457. u32 dev_num = 0;
  1458. printf("\tinterface_mask: 0x%x\n", topology_db->if_act_mask);
  1459. printf("\tNumber of buses: 0x%x\n",
  1460. ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE));
  1461. printf("\tbus_act_mask: 0x%x\n", topology_db->bus_act_mask);
  1462. for (ui = 0; ui < MAX_INTERFACE_NUM; ui++) {
  1463. VALIDATE_IF_ACTIVE(topology_db->if_act_mask, ui);
  1464. printf("\n\tInterface ID: %d\n", ui);
  1465. printf("\t\tDDR Frequency: %s\n",
  1466. convert_freq(topology_db->
  1467. interface_params[ui].memory_freq));
  1468. printf("\t\tSpeed_bin: %d\n",
  1469. topology_db->interface_params[ui].speed_bin_index);
  1470. printf("\t\tBus_width: %d\n",
  1471. (4 << topology_db->interface_params[ui].bus_width));
  1472. printf("\t\tMem_size: %s\n",
  1473. convert_mem_size(topology_db->
  1474. interface_params[ui].memory_size));
  1475. printf("\t\tCAS-WL: %d\n",
  1476. topology_db->interface_params[ui].cas_wl);
  1477. printf("\t\tCAS-L: %d\n",
  1478. topology_db->interface_params[ui].cas_l);
  1479. printf("\t\tTemperature: %d\n",
  1480. topology_db->interface_params[ui].interface_temp);
  1481. printf("\n");
  1482. for (uj = 0; uj < 4; uj++) {
  1483. printf("\t\tBus %d parameters- CS Mask: 0x%x\t", uj,
  1484. topology_db->interface_params[ui].
  1485. as_bus_params[uj].cs_bitmask);
  1486. printf("Mirror: 0x%x\t",
  1487. topology_db->interface_params[ui].
  1488. as_bus_params[uj].mirror_enable_bitmask);
  1489. printf("DQS Swap is %s \t",
  1490. (topology_db->
  1491. interface_params[ui].as_bus_params[uj].
  1492. is_dqs_swap == 1) ? "enabled" : "disabled");
  1493. printf("Ck Swap:%s\t",
  1494. (topology_db->
  1495. interface_params[ui].as_bus_params[uj].
  1496. is_ck_swap == 1) ? "enabled" : "disabled");
  1497. printf("\n");
  1498. }
  1499. }
  1500. }
  1501. #endif /* DDR_VIEWER_TOOL */
  1502. #if !defined(EXCLUDE_SWITCH_DEBUG)
  1503. /*
  1504. * Execute XSB Test transaction (rd/wr/both)
  1505. */
  1506. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1507. u32 read_type, u32 burst_length)
  1508. {
  1509. u32 seq = 0, if_id = 0, addr, cnt;
  1510. int ret = MV_OK, ret_tmp;
  1511. u32 data_read[MAX_INTERFACE_NUM];
  1512. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1513. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1514. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1515. addr = mem_addr;
  1516. for (cnt = 0; cnt <= burst_length; cnt++) {
  1517. seq = (seq + 1) % 8;
  1518. if (write_type != 0) {
  1519. CHECK_STATUS(ddr3_tip_ext_write
  1520. (dev_num, if_id, addr, 1,
  1521. xsb_test_table[seq]));
  1522. }
  1523. if (read_type != 0) {
  1524. CHECK_STATUS(ddr3_tip_ext_read
  1525. (dev_num, if_id, addr, 1,
  1526. data_read));
  1527. }
  1528. if ((read_type != 0) && (write_type != 0)) {
  1529. ret_tmp =
  1530. ddr3_tip_compare(if_id,
  1531. xsb_test_table[seq],
  1532. data_read,
  1533. 0xff);
  1534. addr += (EXT_ACCESS_BURST_LENGTH * 4);
  1535. ret = (ret != MV_OK) ? ret : ret_tmp;
  1536. }
  1537. }
  1538. }
  1539. return ret;
  1540. }
  1541. #else /*EXCLUDE_SWITCH_DEBUG */
  1542. u32 start_xsb_offset = 0;
  1543. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1544. u32 read_type, u32 burst_length)
  1545. {
  1546. return MV_OK;
  1547. }
  1548. #endif /* EXCLUDE_SWITCH_DEBUG */