ddr3_debug.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include <common.h>
  6. #include <i2c.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. u8 is_reg_dump = 0;
  13. u8 debug_pbs = DEBUG_LEVEL_ERROR;
  14. /*
  15. * API to change flags outside of the lib
  16. */
  17. #ifndef SILENT_LIB
  18. /* Debug flags for other Training modules */
  19. u8 debug_training_static = DEBUG_LEVEL_ERROR;
  20. u8 debug_training = DEBUG_LEVEL_ERROR;
  21. u8 debug_leveling = DEBUG_LEVEL_ERROR;
  22. u8 debug_centralization = DEBUG_LEVEL_ERROR;
  23. u8 debug_training_ip = DEBUG_LEVEL_ERROR;
  24. u8 debug_training_bist = DEBUG_LEVEL_ERROR;
  25. u8 debug_training_hw_alg = DEBUG_LEVEL_ERROR;
  26. u8 debug_training_access = DEBUG_LEVEL_ERROR;
  27. u8 debug_training_a38x = DEBUG_LEVEL_ERROR;
  28. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  29. {
  30. switch (block) {
  31. case DEBUG_BLOCK_STATIC:
  32. debug_training_static = level;
  33. break;
  34. case DEBUG_BLOCK_TRAINING_MAIN:
  35. debug_training = level;
  36. break;
  37. case DEBUG_BLOCK_LEVELING:
  38. debug_leveling = level;
  39. break;
  40. case DEBUG_BLOCK_CENTRALIZATION:
  41. debug_centralization = level;
  42. break;
  43. case DEBUG_BLOCK_PBS:
  44. debug_pbs = level;
  45. break;
  46. case DEBUG_BLOCK_ALG:
  47. debug_training_hw_alg = level;
  48. break;
  49. case DEBUG_BLOCK_DEVICE:
  50. debug_training_a38x = level;
  51. break;
  52. case DEBUG_BLOCK_ACCESS:
  53. debug_training_access = level;
  54. break;
  55. case DEBUG_STAGES_REG_DUMP:
  56. if (level == DEBUG_LEVEL_TRACE)
  57. is_reg_dump = 1;
  58. else
  59. is_reg_dump = 0;
  60. break;
  61. case DEBUG_BLOCK_ALL:
  62. default:
  63. debug_training_static = level;
  64. debug_training = level;
  65. debug_leveling = level;
  66. debug_centralization = level;
  67. debug_pbs = level;
  68. debug_training_hw_alg = level;
  69. debug_training_access = level;
  70. debug_training_a38x = level;
  71. }
  72. }
  73. #else
  74. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  75. {
  76. return;
  77. }
  78. #endif
  79. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  80. u8 is_default_centralization = 0;
  81. u8 is_tune_result = 0;
  82. u8 is_validate_window_per_if = 0;
  83. u8 is_validate_window_per_pup = 0;
  84. u8 sweep_cnt = 1;
  85. u32 is_bist_reset_bit = 1;
  86. static struct hws_xsb_info xsb_info[HWS_MAX_DEVICE_NUM];
  87. /*
  88. * Dump Dunit & Phy registers
  89. */
  90. int ddr3_tip_reg_dump(u32 dev_num)
  91. {
  92. u32 if_id, reg_addr, data_value, bus_id;
  93. u32 read_data[MAX_INTERFACE_NUM];
  94. struct hws_topology_map *tm = ddr3_get_topology_map();
  95. printf("-- dunit registers --\n");
  96. for (reg_addr = 0x1400; reg_addr < 0x19f0; reg_addr += 4) {
  97. printf("0x%x ", reg_addr);
  98. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  99. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  100. CHECK_STATUS(ddr3_tip_if_read
  101. (dev_num, ACCESS_TYPE_UNICAST,
  102. if_id, reg_addr, read_data,
  103. MASK_ALL_BITS));
  104. printf("0x%x ", read_data[if_id]);
  105. }
  106. printf("\n");
  107. }
  108. printf("-- Phy registers --\n");
  109. for (reg_addr = 0; reg_addr <= 0xff; reg_addr++) {
  110. printf("0x%x ", reg_addr);
  111. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  112. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  113. for (bus_id = 0;
  114. bus_id < tm->num_of_bus_per_interface;
  115. bus_id++) {
  116. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  117. CHECK_STATUS(ddr3_tip_bus_read
  118. (dev_num, if_id,
  119. ACCESS_TYPE_UNICAST, bus_id,
  120. DDR_PHY_DATA, reg_addr,
  121. &data_value));
  122. printf("0x%x ", data_value);
  123. }
  124. for (bus_id = 0;
  125. bus_id < tm->num_of_bus_per_interface;
  126. bus_id++) {
  127. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  128. CHECK_STATUS(ddr3_tip_bus_read
  129. (dev_num, if_id,
  130. ACCESS_TYPE_UNICAST, bus_id,
  131. DDR_PHY_CONTROL, reg_addr,
  132. &data_value));
  133. printf("0x%x ", data_value);
  134. }
  135. }
  136. printf("\n");
  137. }
  138. return MV_OK;
  139. }
  140. /*
  141. * Register access func registration
  142. */
  143. int ddr3_tip_init_config_func(u32 dev_num,
  144. struct hws_tip_config_func_db *config_func)
  145. {
  146. if (config_func == NULL)
  147. return MV_BAD_PARAM;
  148. memcpy(&config_func_info[dev_num], config_func,
  149. sizeof(struct hws_tip_config_func_db));
  150. return MV_OK;
  151. }
  152. /*
  153. * Get training result info pointer
  154. */
  155. enum hws_result *ddr3_tip_get_result_ptr(u32 stage)
  156. {
  157. return training_result[stage];
  158. }
  159. /*
  160. * Device info read
  161. */
  162. int ddr3_tip_get_device_info(u32 dev_num, struct ddr3_device_info *info_ptr)
  163. {
  164. if (config_func_info[dev_num].tip_get_device_info_func != NULL) {
  165. return config_func_info[dev_num].
  166. tip_get_device_info_func((u8) dev_num, info_ptr);
  167. }
  168. return MV_FAIL;
  169. }
  170. #ifndef EXCLUDE_SWITCH_DEBUG
  171. /*
  172. * Convert freq to character string
  173. */
  174. static char *convert_freq(enum hws_ddr_freq freq)
  175. {
  176. switch (freq) {
  177. case DDR_FREQ_LOW_FREQ:
  178. return "DDR_FREQ_LOW_FREQ";
  179. case DDR_FREQ_400:
  180. return "400";
  181. case DDR_FREQ_533:
  182. return "533";
  183. case DDR_FREQ_667:
  184. return "667";
  185. case DDR_FREQ_800:
  186. return "800";
  187. case DDR_FREQ_933:
  188. return "933";
  189. case DDR_FREQ_1066:
  190. return "1066";
  191. case DDR_FREQ_311:
  192. return "311";
  193. case DDR_FREQ_333:
  194. return "333";
  195. case DDR_FREQ_467:
  196. return "467";
  197. case DDR_FREQ_850:
  198. return "850";
  199. case DDR_FREQ_900:
  200. return "900";
  201. case DDR_FREQ_360:
  202. return "DDR_FREQ_360";
  203. case DDR_FREQ_1000:
  204. return "DDR_FREQ_1000";
  205. default:
  206. return "Unknown Frequency";
  207. }
  208. }
  209. /*
  210. * Convert device ID to character string
  211. */
  212. static char *convert_dev_id(u32 dev_id)
  213. {
  214. switch (dev_id) {
  215. case 0x6800:
  216. return "A38xx";
  217. case 0x6900:
  218. return "A39XX";
  219. case 0xf400:
  220. return "AC3";
  221. case 0xfc00:
  222. return "BC2";
  223. default:
  224. return "Unknown Device";
  225. }
  226. }
  227. /*
  228. * Convert device ID to character string
  229. */
  230. static char *convert_mem_size(u32 dev_id)
  231. {
  232. switch (dev_id) {
  233. case 0:
  234. return "512 MB";
  235. case 1:
  236. return "1 GB";
  237. case 2:
  238. return "2 GB";
  239. case 3:
  240. return "4 GB";
  241. case 4:
  242. return "8 GB";
  243. default:
  244. return "wrong mem size";
  245. }
  246. }
  247. int print_device_info(u8 dev_num)
  248. {
  249. struct ddr3_device_info info_ptr;
  250. struct hws_topology_map *tm = ddr3_get_topology_map();
  251. CHECK_STATUS(ddr3_tip_get_device_info(dev_num, &info_ptr));
  252. printf("=== DDR setup START===\n");
  253. printf("\tDevice ID: %s\n", convert_dev_id(info_ptr.device_id));
  254. printf("\tDDR3 CK delay: %d\n", info_ptr.ck_delay);
  255. print_topology(tm);
  256. printf("=== DDR setup END===\n");
  257. return MV_OK;
  258. }
  259. void hws_ddr3_tip_sweep_test(int enable)
  260. {
  261. if (enable) {
  262. is_validate_window_per_if = 1;
  263. is_validate_window_per_pup = 1;
  264. debug_training = DEBUG_LEVEL_TRACE;
  265. } else {
  266. is_validate_window_per_if = 0;
  267. is_validate_window_per_pup = 0;
  268. }
  269. }
  270. #endif
  271. char *ddr3_tip_convert_tune_result(enum hws_result tune_result)
  272. {
  273. switch (tune_result) {
  274. case TEST_FAILED:
  275. return "FAILED";
  276. case TEST_SUCCESS:
  277. return "PASS";
  278. case NO_TEST_DONE:
  279. return "NOT COMPLETED";
  280. default:
  281. return "Un-KNOWN";
  282. }
  283. }
  284. /*
  285. * Print log info
  286. */
  287. int ddr3_tip_print_log(u32 dev_num, u32 mem_addr)
  288. {
  289. u32 if_id = 0;
  290. struct hws_topology_map *tm = ddr3_get_topology_map();
  291. #ifndef EXCLUDE_SWITCH_DEBUG
  292. if ((is_validate_window_per_if != 0) ||
  293. (is_validate_window_per_pup != 0)) {
  294. u32 is_pup_log = 0;
  295. enum hws_ddr_freq freq;
  296. freq = tm->interface_params[first_active_if].memory_freq;
  297. is_pup_log = (is_validate_window_per_pup != 0) ? 1 : 0;
  298. printf("===VALIDATE WINDOW LOG START===\n");
  299. printf("DDR Frequency: %s ======\n", convert_freq(freq));
  300. /* print sweep windows */
  301. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 1, is_pup_log);
  302. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 0, is_pup_log);
  303. ddr3_tip_print_all_pbs_result(dev_num);
  304. ddr3_tip_print_wl_supp_result(dev_num);
  305. printf("===VALIDATE WINDOW LOG END ===\n");
  306. CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num));
  307. ddr3_tip_reg_dump(dev_num);
  308. }
  309. #endif
  310. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  311. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  312. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  313. ("IF %d Status:\n", if_id));
  314. if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) {
  315. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  316. ("\tInit Controller: %s\n",
  317. ddr3_tip_convert_tune_result
  318. (training_result[INIT_CONTROLLER]
  319. [if_id])));
  320. }
  321. if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) {
  322. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  323. ("\tLow freq Config: %s\n",
  324. ddr3_tip_convert_tune_result
  325. (training_result[SET_LOW_FREQ]
  326. [if_id])));
  327. }
  328. if (mask_tune_func & LOAD_PATTERN_MASK_BIT) {
  329. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  330. ("\tLoad Pattern: %s\n",
  331. ddr3_tip_convert_tune_result
  332. (training_result[LOAD_PATTERN]
  333. [if_id])));
  334. }
  335. if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) {
  336. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  337. ("\tMedium freq Config: %s\n",
  338. ddr3_tip_convert_tune_result
  339. (training_result[SET_MEDIUM_FREQ]
  340. [if_id])));
  341. }
  342. if (mask_tune_func & WRITE_LEVELING_MASK_BIT) {
  343. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  344. ("\tWL: %s\n",
  345. ddr3_tip_convert_tune_result
  346. (training_result[WRITE_LEVELING]
  347. [if_id])));
  348. }
  349. if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) {
  350. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  351. ("\tLoad Pattern: %s\n",
  352. ddr3_tip_convert_tune_result
  353. (training_result[LOAD_PATTERN_2]
  354. [if_id])));
  355. }
  356. if (mask_tune_func & READ_LEVELING_MASK_BIT) {
  357. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  358. ("\tRL: %s\n",
  359. ddr3_tip_convert_tune_result
  360. (training_result[READ_LEVELING]
  361. [if_id])));
  362. }
  363. if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) {
  364. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  365. ("\tWL Supp: %s\n",
  366. ddr3_tip_convert_tune_result
  367. (training_result[WRITE_LEVELING_SUPP]
  368. [if_id])));
  369. }
  370. if (mask_tune_func & PBS_RX_MASK_BIT) {
  371. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  372. ("\tPBS RX: %s\n",
  373. ddr3_tip_convert_tune_result
  374. (training_result[PBS_RX]
  375. [if_id])));
  376. }
  377. if (mask_tune_func & PBS_TX_MASK_BIT) {
  378. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  379. ("\tPBS TX: %s\n",
  380. ddr3_tip_convert_tune_result
  381. (training_result[PBS_TX]
  382. [if_id])));
  383. }
  384. if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
  385. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  386. ("\tTarget freq Config: %s\n",
  387. ddr3_tip_convert_tune_result
  388. (training_result[SET_TARGET_FREQ]
  389. [if_id])));
  390. }
  391. if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) {
  392. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  393. ("\tWL TF: %s\n",
  394. ddr3_tip_convert_tune_result
  395. (training_result[WRITE_LEVELING_TF]
  396. [if_id])));
  397. }
  398. if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) {
  399. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  400. ("\tRL TF: %s\n",
  401. ddr3_tip_convert_tune_result
  402. (training_result[READ_LEVELING_TF]
  403. [if_id])));
  404. }
  405. if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
  406. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  407. ("\tWL TF Supp: %s\n",
  408. ddr3_tip_convert_tune_result
  409. (training_result
  410. [WRITE_LEVELING_SUPP_TF]
  411. [if_id])));
  412. }
  413. if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) {
  414. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  415. ("\tCentr RX: %s\n",
  416. ddr3_tip_convert_tune_result
  417. (training_result[CENTRALIZATION_RX]
  418. [if_id])));
  419. }
  420. if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) {
  421. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  422. ("\tVREF_CALIBRATION: %s\n",
  423. ddr3_tip_convert_tune_result
  424. (training_result[VREF_CALIBRATION]
  425. [if_id])));
  426. }
  427. if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
  428. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  429. ("\tCentr TX: %s\n",
  430. ddr3_tip_convert_tune_result
  431. (training_result[CENTRALIZATION_TX]
  432. [if_id])));
  433. }
  434. }
  435. return MV_OK;
  436. }
  437. /*
  438. * Print stability log info
  439. */
  440. int ddr3_tip_print_stability_log(u32 dev_num)
  441. {
  442. u8 if_id = 0, csindex = 0, bus_id = 0, idx = 0;
  443. u32 reg_data;
  444. u32 read_data[MAX_INTERFACE_NUM];
  445. u32 max_cs = hws_ddr3_tip_max_cs_get();
  446. struct hws_topology_map *tm = ddr3_get_topology_map();
  447. /* Title print */
  448. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  449. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  450. printf("Title: I/F# , Tj, Calibration_n0, Calibration_p0, Calibration_n1, Calibration_p1, Calibration_n2, Calibration_p2,");
  451. for (csindex = 0; csindex < max_cs; csindex++) {
  452. printf("CS%d , ", csindex);
  453. printf("\n");
  454. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  455. printf("VWTx, VWRx, WL_tot, WL_ADLL, WL_PH, RL_Tot, RL_ADLL, RL_PH, RL_Smp, Cen_tx, Cen_rx, Vref, DQVref,");
  456. printf("\t\t");
  457. for (idx = 0; idx < 11; idx++)
  458. printf("PBSTx-Pad%d,", idx);
  459. printf("\t\t");
  460. for (idx = 0; idx < 11; idx++)
  461. printf("PBSRx-Pad%d,", idx);
  462. }
  463. }
  464. printf("\n");
  465. /* Data print */
  466. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  467. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  468. printf("Data: %d,%d,", if_id,
  469. (config_func_info[dev_num].tip_get_temperature != NULL)
  470. ? (config_func_info[dev_num].
  471. tip_get_temperature(dev_num)) : (0));
  472. CHECK_STATUS(ddr3_tip_if_read
  473. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14c8,
  474. read_data, MASK_ALL_BITS));
  475. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  476. ((read_data[if_id] & 0xfc00) >> 10));
  477. CHECK_STATUS(ddr3_tip_if_read
  478. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17c8,
  479. read_data, MASK_ALL_BITS));
  480. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  481. ((read_data[if_id] & 0xfc00) >> 10));
  482. CHECK_STATUS(ddr3_tip_if_read
  483. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1dc8,
  484. read_data, MASK_ALL_BITS));
  485. printf("%d,%d,", ((read_data[if_id] & 0x3f0000) >> 16),
  486. ((read_data[if_id] & 0xfc00000) >> 22));
  487. for (csindex = 0; csindex < max_cs; csindex++) {
  488. printf("CS%d , ", csindex);
  489. for (bus_id = 0; bus_id < MAX_BUS_NUM; bus_id++) {
  490. printf("\n");
  491. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  492. ddr3_tip_bus_read(dev_num, if_id,
  493. ACCESS_TYPE_UNICAST,
  494. bus_id, DDR_PHY_DATA,
  495. RESULT_DB_PHY_REG_ADDR +
  496. csindex, &reg_data);
  497. printf("%d,%d,", (reg_data & 0x1f),
  498. ((reg_data & 0x3e0) >> 5));
  499. /* WL */
  500. ddr3_tip_bus_read(dev_num, if_id,
  501. ACCESS_TYPE_UNICAST,
  502. bus_id, DDR_PHY_DATA,
  503. WL_PHY_REG +
  504. csindex * 4, &reg_data);
  505. printf("%d,%d,%d,",
  506. (reg_data & 0x1f) +
  507. ((reg_data & 0x1c0) >> 6) * 32,
  508. (reg_data & 0x1f),
  509. (reg_data & 0x1c0) >> 6);
  510. /* RL */
  511. CHECK_STATUS(ddr3_tip_if_read
  512. (dev_num, ACCESS_TYPE_UNICAST,
  513. if_id,
  514. READ_DATA_SAMPLE_DELAY,
  515. read_data, MASK_ALL_BITS));
  516. read_data[if_id] =
  517. (read_data[if_id] &
  518. (0xf << (4 * csindex))) >>
  519. (4 * csindex);
  520. ddr3_tip_bus_read(dev_num, if_id,
  521. ACCESS_TYPE_UNICAST, bus_id,
  522. DDR_PHY_DATA,
  523. RL_PHY_REG + csindex * 4,
  524. &reg_data);
  525. printf("%d,%d,%d,%d,",
  526. (reg_data & 0x1f) +
  527. ((reg_data & 0x1c0) >> 6) * 32 +
  528. read_data[if_id] * 64,
  529. (reg_data & 0x1f),
  530. ((reg_data & 0x1c0) >> 6),
  531. read_data[if_id]);
  532. /* Centralization */
  533. ddr3_tip_bus_read(dev_num, if_id,
  534. ACCESS_TYPE_UNICAST, bus_id,
  535. DDR_PHY_DATA,
  536. WRITE_CENTRALIZATION_PHY_REG
  537. + csindex * 4, &reg_data);
  538. printf("%d,", (reg_data & 0x3f));
  539. ddr3_tip_bus_read(dev_num, if_id,
  540. ACCESS_TYPE_UNICAST, bus_id,
  541. DDR_PHY_DATA,
  542. READ_CENTRALIZATION_PHY_REG
  543. + csindex * 4, &reg_data);
  544. printf("%d,", (reg_data & 0x1f));
  545. /* Vref */
  546. ddr3_tip_bus_read(dev_num, if_id,
  547. ACCESS_TYPE_UNICAST, bus_id,
  548. DDR_PHY_DATA,
  549. PAD_CONFIG_PHY_REG,
  550. &reg_data);
  551. printf("%d,", (reg_data & 0x7));
  552. /* DQVref */
  553. /* Need to add the Read Function from device */
  554. printf("%d,", 0);
  555. printf("\t\t");
  556. for (idx = 0; idx < 11; idx++) {
  557. ddr3_tip_bus_read(dev_num, if_id,
  558. ACCESS_TYPE_UNICAST,
  559. bus_id, DDR_PHY_DATA,
  560. 0xd0 +
  561. 12 * csindex +
  562. idx, &reg_data);
  563. printf("%d,", (reg_data & 0x3f));
  564. }
  565. printf("\t\t");
  566. for (idx = 0; idx < 11; idx++) {
  567. ddr3_tip_bus_read(dev_num, if_id,
  568. ACCESS_TYPE_UNICAST,
  569. bus_id, DDR_PHY_DATA,
  570. 0x10 +
  571. 16 * csindex +
  572. idx, &reg_data);
  573. printf("%d,", (reg_data & 0x3f));
  574. }
  575. printf("\t\t");
  576. for (idx = 0; idx < 11; idx++) {
  577. ddr3_tip_bus_read(dev_num, if_id,
  578. ACCESS_TYPE_UNICAST,
  579. bus_id, DDR_PHY_DATA,
  580. 0x50 +
  581. 16 * csindex +
  582. idx, &reg_data);
  583. printf("%d,", (reg_data & 0x3f));
  584. }
  585. }
  586. }
  587. }
  588. printf("\n");
  589. return MV_OK;
  590. }
  591. /*
  592. * Register XSB information
  593. */
  594. int ddr3_tip_register_xsb_info(u32 dev_num, struct hws_xsb_info *xsb_info_table)
  595. {
  596. memcpy(&xsb_info[dev_num], xsb_info_table, sizeof(struct hws_xsb_info));
  597. return MV_OK;
  598. }
  599. /*
  600. * Read ADLL Value
  601. */
  602. int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  603. int reg_addr, u32 mask)
  604. {
  605. u32 data_value;
  606. u32 if_id = 0, bus_id = 0;
  607. u32 dev_num = 0;
  608. struct hws_topology_map *tm = ddr3_get_topology_map();
  609. /*
  610. * multi CS support - reg_addr is calucalated in calling function
  611. * with CS offset
  612. */
  613. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  614. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  615. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  616. bus_id++) {
  617. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  618. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  619. ACCESS_TYPE_UNICAST,
  620. bus_id,
  621. DDR_PHY_DATA, reg_addr,
  622. &data_value));
  623. pup_values[if_id *
  624. tm->num_of_bus_per_interface + bus_id] =
  625. data_value & mask;
  626. }
  627. }
  628. return 0;
  629. }
  630. /*
  631. * Write ADLL Value
  632. */
  633. int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  634. int reg_addr)
  635. {
  636. u32 if_id = 0, bus_id = 0;
  637. u32 dev_num = 0, data;
  638. struct hws_topology_map *tm = ddr3_get_topology_map();
  639. /*
  640. * multi CS support - reg_addr is calucalated in calling function
  641. * with CS offset
  642. */
  643. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  644. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  645. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  646. bus_id++) {
  647. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  648. data = pup_values[if_id *
  649. tm->num_of_bus_per_interface +
  650. bus_id];
  651. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  652. ACCESS_TYPE_UNICAST,
  653. if_id,
  654. ACCESS_TYPE_UNICAST,
  655. bus_id, DDR_PHY_DATA,
  656. reg_addr, data));
  657. }
  658. }
  659. return 0;
  660. }
  661. #ifndef EXCLUDE_SWITCH_DEBUG
  662. u32 rl_version = 1; /* 0 - old RL machine */
  663. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  664. u32 start_xsb_offset = 0;
  665. u8 is_rl_old = 0;
  666. u8 is_freq_old = 0;
  667. u8 is_dfs_disabled = 0;
  668. u32 default_centrlization_value = 0x12;
  669. u32 vref = 0x4;
  670. u32 activate_select_before_run_alg = 1, activate_deselect_after_run_alg = 1,
  671. rl_test = 0, reset_read_fifo = 0;
  672. int debug_acc = 0;
  673. u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM];
  674. u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  675. u8 cs_mask_reg[] = {
  676. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  677. };
  678. u32 xsb_test_table[][8] = {
  679. {0x00000000, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555,
  680. 0x66666666, 0x77777777},
  681. {0x88888888, 0x99999999, 0xaaaaaaaa, 0xbbbbbbbb, 0xcccccccc, 0xdddddddd,
  682. 0xeeeeeeee, 0xffffffff},
  683. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  684. 0x00000000, 0xffffffff},
  685. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  686. 0x00000000, 0xffffffff},
  687. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  688. 0x00000000, 0xffffffff},
  689. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  690. 0x00000000, 0xffffffff},
  691. {0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
  692. 0xffffffff, 0xffffffff},
  693. {0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000,
  694. 0x00000000, 0x00000000},
  695. {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
  696. 0xffffffff, 0xffffffff}
  697. };
  698. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr);
  699. int ddr3_tip_print_adll(void)
  700. {
  701. u32 bus_cnt = 0, if_id, data_p1, data_p2, ui_data3, dev_num = 0;
  702. struct hws_topology_map *tm = ddr3_get_topology_map();
  703. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  704. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  705. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES();
  706. bus_cnt++) {
  707. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  708. CHECK_STATUS(ddr3_tip_bus_read
  709. (dev_num, if_id,
  710. ACCESS_TYPE_UNICAST, bus_cnt,
  711. DDR_PHY_DATA, 0x1, &data_p1));
  712. CHECK_STATUS(ddr3_tip_bus_read
  713. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  714. bus_cnt, DDR_PHY_DATA, 0x2, &data_p2));
  715. CHECK_STATUS(ddr3_tip_bus_read
  716. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  717. bus_cnt, DDR_PHY_DATA, 0x3, &ui_data3));
  718. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  719. (" IF %d bus_cnt %d phy_reg_1_data 0x%x phy_reg_2_data 0x%x phy_reg_3_data 0x%x\n",
  720. if_id, bus_cnt, data_p1, data_p2,
  721. ui_data3));
  722. }
  723. }
  724. return MV_OK;
  725. }
  726. /*
  727. * Set attribute value
  728. */
  729. int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value)
  730. {
  731. int ret;
  732. u32 *ptr_flag = NULL;
  733. ret = ddr3_tip_access_atr(dev_num, flag_id, value, &ptr_flag);
  734. if (ptr_flag != NULL) {
  735. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x (was 0x%x)\n",
  736. flag_id, value, *ptr_flag);
  737. *ptr_flag = value;
  738. } else {
  739. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x\n",
  740. flag_id, value);
  741. }
  742. return ret;
  743. }
  744. /*
  745. * Access attribute
  746. */
  747. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr)
  748. {
  749. u32 tmp_val = 0, if_id = 0, pup_id = 0;
  750. struct hws_topology_map *tm = ddr3_get_topology_map();
  751. *ptr = NULL;
  752. switch (flag_id) {
  753. case 0:
  754. *ptr = (u32 *)&(tm->if_act_mask);
  755. break;
  756. case 0x1:
  757. *ptr = (u32 *)&mask_tune_func;
  758. break;
  759. case 0x2:
  760. *ptr = (u32 *)&low_freq;
  761. break;
  762. case 0x3:
  763. *ptr = (u32 *)&medium_freq;
  764. break;
  765. case 0x4:
  766. *ptr = (u32 *)&generic_init_controller;
  767. break;
  768. case 0x5:
  769. *ptr = (u32 *)&rl_version;
  770. break;
  771. case 0x8:
  772. *ptr = (u32 *)&start_xsb_offset;
  773. break;
  774. case 0x20:
  775. *ptr = (u32 *)&is_rl_old;
  776. break;
  777. case 0x21:
  778. *ptr = (u32 *)&is_freq_old;
  779. break;
  780. case 0x23:
  781. *ptr = (u32 *)&is_dfs_disabled;
  782. break;
  783. case 0x24:
  784. *ptr = (u32 *)&is_pll_before_init;
  785. break;
  786. case 0x25:
  787. *ptr = (u32 *)&is_adll_calib_before_init;
  788. break;
  789. #ifdef STATIC_ALGO_SUPPORT
  790. case 0x26:
  791. *ptr = (u32 *)&(silicon_delay[0]);
  792. break;
  793. case 0x27:
  794. *ptr = (u32 *)&wl_debug_delay;
  795. break;
  796. #endif
  797. case 0x28:
  798. *ptr = (u32 *)&is_tune_result;
  799. break;
  800. case 0x29:
  801. *ptr = (u32 *)&is_validate_window_per_if;
  802. break;
  803. case 0x2a:
  804. *ptr = (u32 *)&is_validate_window_per_pup;
  805. break;
  806. case 0x30:
  807. *ptr = (u32 *)&sweep_cnt;
  808. break;
  809. case 0x31:
  810. *ptr = (u32 *)&is_bist_reset_bit;
  811. break;
  812. case 0x32:
  813. *ptr = (u32 *)&is_dfs_in_init;
  814. break;
  815. case 0x33:
  816. *ptr = (u32 *)&p_finger;
  817. break;
  818. case 0x34:
  819. *ptr = (u32 *)&n_finger;
  820. break;
  821. case 0x35:
  822. *ptr = (u32 *)&init_freq;
  823. break;
  824. case 0x36:
  825. *ptr = (u32 *)&(freq_val[DDR_FREQ_LOW_FREQ]);
  826. break;
  827. case 0x37:
  828. *ptr = (u32 *)&start_pattern;
  829. break;
  830. case 0x38:
  831. *ptr = (u32 *)&end_pattern;
  832. break;
  833. case 0x39:
  834. *ptr = (u32 *)&phy_reg0_val;
  835. break;
  836. case 0x4a:
  837. *ptr = (u32 *)&phy_reg1_val;
  838. break;
  839. case 0x4b:
  840. *ptr = (u32 *)&phy_reg2_val;
  841. break;
  842. case 0x4c:
  843. *ptr = (u32 *)&phy_reg3_val;
  844. break;
  845. case 0x4e:
  846. *ptr = (u32 *)&sweep_pattern;
  847. break;
  848. case 0x50:
  849. *ptr = (u32 *)&is_rzq6;
  850. break;
  851. case 0x51:
  852. *ptr = (u32 *)&znri_data_phy_val;
  853. break;
  854. case 0x52:
  855. *ptr = (u32 *)&zpri_data_phy_val;
  856. break;
  857. case 0x53:
  858. *ptr = (u32 *)&finger_test;
  859. break;
  860. case 0x54:
  861. *ptr = (u32 *)&n_finger_start;
  862. break;
  863. case 0x55:
  864. *ptr = (u32 *)&n_finger_end;
  865. break;
  866. case 0x56:
  867. *ptr = (u32 *)&p_finger_start;
  868. break;
  869. case 0x57:
  870. *ptr = (u32 *)&p_finger_end;
  871. break;
  872. case 0x58:
  873. *ptr = (u32 *)&p_finger_step;
  874. break;
  875. case 0x59:
  876. *ptr = (u32 *)&n_finger_step;
  877. break;
  878. case 0x5a:
  879. *ptr = (u32 *)&znri_ctrl_phy_val;
  880. break;
  881. case 0x5b:
  882. *ptr = (u32 *)&zpri_ctrl_phy_val;
  883. break;
  884. case 0x5c:
  885. *ptr = (u32 *)&is_reg_dump;
  886. break;
  887. case 0x5d:
  888. *ptr = (u32 *)&vref;
  889. break;
  890. case 0x5e:
  891. *ptr = (u32 *)&mode2_t;
  892. break;
  893. case 0x5f:
  894. *ptr = (u32 *)&xsb_validate_type;
  895. break;
  896. case 0x60:
  897. *ptr = (u32 *)&xsb_validation_base_address;
  898. break;
  899. case 0x67:
  900. *ptr = (u32 *)&activate_select_before_run_alg;
  901. break;
  902. case 0x68:
  903. *ptr = (u32 *)&activate_deselect_after_run_alg;
  904. break;
  905. case 0x69:
  906. *ptr = (u32 *)&odt_additional;
  907. break;
  908. case 0x70:
  909. *ptr = (u32 *)&debug_mode;
  910. break;
  911. case 0x71:
  912. *ptr = (u32 *)&pbs_pattern;
  913. break;
  914. case 0x72:
  915. *ptr = (u32 *)&delay_enable;
  916. break;
  917. case 0x73:
  918. *ptr = (u32 *)&ck_delay;
  919. break;
  920. case 0x74:
  921. *ptr = (u32 *)&ck_delay_16;
  922. break;
  923. case 0x75:
  924. *ptr = (u32 *)&ca_delay;
  925. break;
  926. case 0x100:
  927. *ptr = (u32 *)&debug_dunit;
  928. break;
  929. case 0x101:
  930. debug_acc = (int)value;
  931. break;
  932. case 0x102:
  933. debug_training = (u8)value;
  934. break;
  935. case 0x103:
  936. debug_training_bist = (u8)value;
  937. break;
  938. case 0x104:
  939. debug_centralization = (u8)value;
  940. break;
  941. case 0x105:
  942. debug_training_ip = (u8)value;
  943. break;
  944. case 0x106:
  945. debug_leveling = (u8)value;
  946. break;
  947. case 0x107:
  948. debug_pbs = (u8)value;
  949. break;
  950. case 0x108:
  951. debug_training_static = (u8)value;
  952. break;
  953. case 0x109:
  954. debug_training_access = (u8)value;
  955. break;
  956. case 0x112:
  957. *ptr = &start_pattern;
  958. break;
  959. case 0x113:
  960. *ptr = &end_pattern;
  961. break;
  962. default:
  963. if ((flag_id >= 0x200) && (flag_id < 0x210)) {
  964. if_id = flag_id - 0x200;
  965. *ptr = (u32 *)&(tm->interface_params
  966. [if_id].memory_freq);
  967. } else if ((flag_id >= 0x210) && (flag_id < 0x220)) {
  968. if_id = flag_id - 0x210;
  969. *ptr = (u32 *)&(tm->interface_params
  970. [if_id].speed_bin_index);
  971. } else if ((flag_id >= 0x220) && (flag_id < 0x230)) {
  972. if_id = flag_id - 0x220;
  973. *ptr = (u32 *)&(tm->interface_params
  974. [if_id].bus_width);
  975. } else if ((flag_id >= 0x230) && (flag_id < 0x240)) {
  976. if_id = flag_id - 0x230;
  977. *ptr = (u32 *)&(tm->interface_params
  978. [if_id].memory_size);
  979. } else if ((flag_id >= 0x240) && (flag_id < 0x250)) {
  980. if_id = flag_id - 0x240;
  981. *ptr = (u32 *)&(tm->interface_params
  982. [if_id].cas_l);
  983. } else if ((flag_id >= 0x250) && (flag_id < 0x260)) {
  984. if_id = flag_id - 0x250;
  985. *ptr = (u32 *)&(tm->interface_params
  986. [if_id].cas_wl);
  987. } else if ((flag_id >= 0x270) && (flag_id < 0x2cf)) {
  988. if_id = (flag_id - 0x270) / MAX_BUS_NUM;
  989. pup_id = (flag_id - 0x270) % MAX_BUS_NUM;
  990. *ptr = (u32 *)&(tm->interface_params[if_id].
  991. as_bus_params[pup_id].is_ck_swap);
  992. } else if ((flag_id >= 0x2d0) && (flag_id < 0x32f)) {
  993. if_id = (flag_id - 0x2d0) / MAX_BUS_NUM;
  994. pup_id = (flag_id - 0x2d0) % MAX_BUS_NUM;
  995. *ptr = (u32 *)&(tm->interface_params[if_id].
  996. as_bus_params[pup_id].is_dqs_swap);
  997. } else if ((flag_id >= 0x330) && (flag_id < 0x38f)) {
  998. if_id = (flag_id - 0x330) / MAX_BUS_NUM;
  999. pup_id = (flag_id - 0x330) % MAX_BUS_NUM;
  1000. *ptr = (u32 *)&(tm->interface_params[if_id].
  1001. as_bus_params[pup_id].cs_bitmask);
  1002. } else if ((flag_id >= 0x390) && (flag_id < 0x3ef)) {
  1003. if_id = (flag_id - 0x390) / MAX_BUS_NUM;
  1004. pup_id = (flag_id - 0x390) % MAX_BUS_NUM;
  1005. *ptr = (u32 *)&(tm->interface_params
  1006. [if_id].as_bus_params
  1007. [pup_id].mirror_enable_bitmask);
  1008. } else if ((flag_id >= 0x500) && (flag_id <= 0x50f)) {
  1009. tmp_val = flag_id - 0x320;
  1010. *ptr = (u32 *)&(clamp_tbl[tmp_val]);
  1011. } else {
  1012. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1013. ("flag_id out of boundary %d\n",
  1014. flag_id));
  1015. return MV_BAD_PARAM;
  1016. }
  1017. }
  1018. return MV_OK;
  1019. }
  1020. #ifndef EXCLUDE_SWITCH_DEBUG
  1021. /*
  1022. * Print ADLL
  1023. */
  1024. int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM])
  1025. {
  1026. u32 i, j;
  1027. struct hws_topology_map *tm = ddr3_get_topology_map();
  1028. for (j = 0; j < tm->num_of_bus_per_interface; j++) {
  1029. VALIDATE_ACTIVE(tm->bus_act_mask, j);
  1030. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  1031. printf("%d ,",
  1032. adll[i * tm->num_of_bus_per_interface + j]);
  1033. }
  1034. }
  1035. printf("\n");
  1036. return MV_OK;
  1037. }
  1038. #endif
  1039. /* byte_index - only byte 0, 1, 2, or 3, oxff - test all bytes */
  1040. static u32 ddr3_tip_compare(u32 if_id, u32 *p_src, u32 *p_dst,
  1041. u32 byte_index)
  1042. {
  1043. u32 burst_cnt = 0, addr_offset, i_id;
  1044. int b_is_fail = 0;
  1045. addr_offset =
  1046. (byte_index ==
  1047. 0xff) ? (u32) 0xffffffff : (u32) (0xff << (byte_index * 8));
  1048. for (burst_cnt = 0; burst_cnt < EXT_ACCESS_BURST_LENGTH; burst_cnt++) {
  1049. if ((p_src[burst_cnt] & addr_offset) !=
  1050. (p_dst[burst_cnt] & addr_offset))
  1051. b_is_fail = 1;
  1052. }
  1053. if (b_is_fail == 1) {
  1054. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1055. ("IF %d exp: ", if_id));
  1056. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1057. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1058. ("0x%8x ", p_src[i_id]));
  1059. }
  1060. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1061. ("\n_i_f %d rcv: ", if_id));
  1062. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1063. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1064. ("(0x%8x ", p_dst[i_id]));
  1065. }
  1066. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("\n "));
  1067. }
  1068. return b_is_fail;
  1069. }
  1070. /* test_type = 0-tx , 1-rx */
  1071. int ddr3_tip_sweep_test(u32 dev_num, u32 test_type,
  1072. u32 mem_addr, u32 is_modify_adll,
  1073. u32 start_if, u32 end_if, u32 startpup, u32 endpup)
  1074. {
  1075. u32 bus_cnt = 0, adll_val = 0, if_id, ui_prev_adll, ui_mask_bit,
  1076. end_adll, start_adll;
  1077. u32 reg_addr = 0;
  1078. struct hws_topology_map *tm = ddr3_get_topology_map();
  1079. if (test_type == 0) {
  1080. reg_addr = 1;
  1081. ui_mask_bit = 0x3f;
  1082. start_adll = 0;
  1083. end_adll = ui_mask_bit;
  1084. } else {
  1085. reg_addr = 3;
  1086. ui_mask_bit = 0x1f;
  1087. start_adll = 0;
  1088. end_adll = ui_mask_bit;
  1089. }
  1090. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1091. ("==============================\n"));
  1092. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1093. ("Test type %d (0-tx, 1-rx)\n", test_type));
  1094. for (if_id = start_if; if_id <= end_if; if_id++) {
  1095. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1096. for (bus_cnt = startpup; bus_cnt < endpup; bus_cnt++) {
  1097. CHECK_STATUS(ddr3_tip_bus_read
  1098. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1099. bus_cnt, DDR_PHY_DATA, reg_addr,
  1100. &ui_prev_adll));
  1101. for (adll_val = start_adll; adll_val <= end_adll;
  1102. adll_val++) {
  1103. if (is_modify_adll == 1) {
  1104. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1105. (dev_num,
  1106. ACCESS_TYPE_UNICAST,
  1107. if_id, bus_cnt,
  1108. DDR_PHY_DATA, reg_addr,
  1109. adll_val, ui_mask_bit));
  1110. }
  1111. }
  1112. if (is_modify_adll == 1) {
  1113. CHECK_STATUS(ddr3_tip_bus_write
  1114. (dev_num, ACCESS_TYPE_UNICAST,
  1115. if_id, ACCESS_TYPE_UNICAST,
  1116. bus_cnt, DDR_PHY_DATA, reg_addr,
  1117. ui_prev_adll));
  1118. }
  1119. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1120. }
  1121. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1122. }
  1123. return MV_OK;
  1124. }
  1125. #ifndef EXCLUDE_SWITCH_DEBUG
  1126. /*
  1127. * Sweep validation
  1128. */
  1129. int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
  1130. u32 mode)
  1131. {
  1132. u32 pup = 0, start_pup = 0, end_pup = 0;
  1133. u32 adll = 0;
  1134. u32 res[MAX_INTERFACE_NUM] = { 0 };
  1135. int if_id = 0;
  1136. u32 adll_value = 0;
  1137. int reg = (direction == 0) ? WRITE_CENTRALIZATION_PHY_REG :
  1138. READ_CENTRALIZATION_PHY_REG;
  1139. enum hws_access_type pup_access;
  1140. u32 cs;
  1141. u32 max_cs = hws_ddr3_tip_max_cs_get();
  1142. struct hws_topology_map *tm = ddr3_get_topology_map();
  1143. if (mode == 1) {
  1144. /* per pup */
  1145. start_pup = 0;
  1146. end_pup = tm->num_of_bus_per_interface - 1;
  1147. pup_access = ACCESS_TYPE_UNICAST;
  1148. } else {
  1149. start_pup = 0;
  1150. end_pup = 0;
  1151. pup_access = ACCESS_TYPE_MULTICAST;
  1152. }
  1153. for (cs = 0; cs < max_cs; cs++) {
  1154. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1155. for (if_id = 0;
  1156. if_id <= MAX_INTERFACE_NUM - 1;
  1157. if_id++) {
  1158. VALIDATE_ACTIVE
  1159. (tm->if_act_mask,
  1160. if_id);
  1161. for (pup = start_pup; pup <= end_pup; pup++) {
  1162. ctrl_sweepres[adll][if_id][pup] =
  1163. 0;
  1164. }
  1165. }
  1166. }
  1167. for (adll = 0; adll < (MAX_INTERFACE_NUM * MAX_BUS_NUM); adll++)
  1168. ctrl_adll[adll] = 0;
  1169. /* Save DQS value(after algorithm run) */
  1170. read_adll_value(ctrl_adll,
  1171. (reg + (cs * CS_REGISTER_ADDR_OFFSET)),
  1172. MASK_ALL_BITS);
  1173. /*
  1174. * Sweep ADLL from 0:31 on all I/F on all Pup and perform
  1175. * BIST on each stage.
  1176. */
  1177. for (pup = start_pup; pup <= end_pup; pup++) {
  1178. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1179. adll_value =
  1180. (direction == 0) ? (adll * 2) : adll;
  1181. CHECK_STATUS(ddr3_tip_bus_write
  1182. (dev_num, ACCESS_TYPE_MULTICAST, 0,
  1183. pup_access, pup, DDR_PHY_DATA,
  1184. reg + CS_REG_VALUE(cs),
  1185. adll_value));
  1186. hws_ddr3_run_bist(dev_num, sweep_pattern, res,
  1187. cs);
  1188. /* ddr3_tip_reset_fifo_ptr(dev_num); */
  1189. for (if_id = 0;
  1190. if_id <= MAX_INTERFACE_NUM - 1;
  1191. if_id++) {
  1192. VALIDATE_ACTIVE
  1193. (tm->if_act_mask,
  1194. if_id);
  1195. ctrl_sweepres[adll][if_id][pup]
  1196. = res[if_id];
  1197. if (mode == 1) {
  1198. CHECK_STATUS
  1199. (ddr3_tip_bus_write
  1200. (dev_num,
  1201. ACCESS_TYPE_UNICAST,
  1202. if_id,
  1203. ACCESS_TYPE_UNICAST,
  1204. pup,
  1205. DDR_PHY_DATA,
  1206. reg + CS_REG_VALUE(cs),
  1207. ctrl_adll[if_id *
  1208. cs *
  1209. tm->num_of_bus_per_interface
  1210. + pup]));
  1211. }
  1212. }
  1213. }
  1214. }
  1215. printf("Final, CS %d,%s, Sweep, Result, Adll,", cs,
  1216. ((direction == 0) ? "TX" : "RX"));
  1217. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1218. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1219. if (mode == 1) {
  1220. for (pup = start_pup; pup <= end_pup; pup++) {
  1221. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  1222. printf("I/F%d-PHY%d , ", if_id, pup);
  1223. }
  1224. } else {
  1225. printf("I/F%d , ", if_id);
  1226. }
  1227. }
  1228. printf("\n");
  1229. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1230. adll_value = (direction == 0) ? (adll * 2) : adll;
  1231. printf("Final,%s, Sweep, Result, %d ,",
  1232. ((direction == 0) ? "TX" : "RX"), adll_value);
  1233. for (if_id = 0;
  1234. if_id <= MAX_INTERFACE_NUM - 1;
  1235. if_id++) {
  1236. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1237. for (pup = start_pup; pup <= end_pup; pup++) {
  1238. printf("%d , ",
  1239. ctrl_sweepres[adll][if_id]
  1240. [pup]);
  1241. }
  1242. }
  1243. printf("\n");
  1244. }
  1245. /*
  1246. * Write back to the phy the Rx DQS value, we store in
  1247. * the beginning.
  1248. */
  1249. write_adll_value(ctrl_adll,
  1250. (reg + cs * CS_REGISTER_ADDR_OFFSET));
  1251. /* print adll results */
  1252. read_adll_value(ctrl_adll, (reg + cs * CS_REGISTER_ADDR_OFFSET),
  1253. MASK_ALL_BITS);
  1254. printf("%s, DQS, ADLL,,,", (direction == 0) ? "Tx" : "Rx");
  1255. print_adll(dev_num, ctrl_adll);
  1256. }
  1257. ddr3_tip_reset_fifo_ptr(dev_num);
  1258. return 0;
  1259. }
  1260. void print_topology(struct hws_topology_map *topology_db)
  1261. {
  1262. u32 ui, uj;
  1263. printf("\tinterface_mask: 0x%x\n", topology_db->if_act_mask);
  1264. printf("\tNum Bus: %d\n", topology_db->num_of_bus_per_interface);
  1265. printf("\tbus_act_mask: 0x%x\n", topology_db->bus_act_mask);
  1266. for (ui = 0; ui < MAX_INTERFACE_NUM; ui++) {
  1267. VALIDATE_ACTIVE(topology_db->if_act_mask, ui);
  1268. printf("\n\tInterface ID: %d\n", ui);
  1269. printf("\t\tDDR Frequency: %s\n",
  1270. convert_freq(topology_db->
  1271. interface_params[ui].memory_freq));
  1272. printf("\t\tSpeed_bin: %d\n",
  1273. topology_db->interface_params[ui].speed_bin_index);
  1274. printf("\t\tBus_width: %d\n",
  1275. (4 << topology_db->interface_params[ui].bus_width));
  1276. printf("\t\tMem_size: %s\n",
  1277. convert_mem_size(topology_db->
  1278. interface_params[ui].memory_size));
  1279. printf("\t\tCAS-WL: %d\n",
  1280. topology_db->interface_params[ui].cas_wl);
  1281. printf("\t\tCAS-L: %d\n",
  1282. topology_db->interface_params[ui].cas_l);
  1283. printf("\t\tTemperature: %d\n",
  1284. topology_db->interface_params[ui].interface_temp);
  1285. printf("\n");
  1286. for (uj = 0; uj < 4; uj++) {
  1287. printf("\t\tBus %d parameters- CS Mask: 0x%x\t", uj,
  1288. topology_db->interface_params[ui].
  1289. as_bus_params[uj].cs_bitmask);
  1290. printf("Mirror: 0x%x\t",
  1291. topology_db->interface_params[ui].
  1292. as_bus_params[uj].mirror_enable_bitmask);
  1293. printf("DQS Swap is %s \t",
  1294. (topology_db->
  1295. interface_params[ui].as_bus_params[uj].
  1296. is_dqs_swap == 1) ? "enabled" : "disabled");
  1297. printf("Ck Swap:%s\t",
  1298. (topology_db->
  1299. interface_params[ui].as_bus_params[uj].
  1300. is_ck_swap == 1) ? "enabled" : "disabled");
  1301. printf("\n");
  1302. }
  1303. }
  1304. }
  1305. #endif
  1306. /*
  1307. * Execute XSB Test transaction (rd/wr/both)
  1308. */
  1309. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1310. u32 read_type, u32 burst_length)
  1311. {
  1312. u32 seq = 0, if_id = 0, addr, cnt;
  1313. int ret = MV_OK, ret_tmp;
  1314. u32 data_read[MAX_INTERFACE_NUM];
  1315. struct hws_topology_map *tm = ddr3_get_topology_map();
  1316. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1317. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1318. addr = mem_addr;
  1319. for (cnt = 0; cnt <= burst_length; cnt++) {
  1320. seq = (seq + 1) % 8;
  1321. if (write_type != 0) {
  1322. CHECK_STATUS(ddr3_tip_ext_write
  1323. (dev_num, if_id, addr, 1,
  1324. xsb_test_table[seq]));
  1325. }
  1326. if (read_type != 0) {
  1327. CHECK_STATUS(ddr3_tip_ext_read
  1328. (dev_num, if_id, addr, 1,
  1329. data_read));
  1330. }
  1331. if ((read_type != 0) && (write_type != 0)) {
  1332. ret_tmp =
  1333. ddr3_tip_compare(if_id,
  1334. xsb_test_table[seq],
  1335. data_read,
  1336. 0xff);
  1337. addr += (EXT_ACCESS_BURST_LENGTH * 4);
  1338. ret = (ret != MV_OK) ? ret : ret_tmp;
  1339. }
  1340. }
  1341. }
  1342. return ret;
  1343. }
  1344. #else /*EXCLUDE_SWITCH_DEBUG */
  1345. u32 rl_version = 1; /* 0 - old RL machine */
  1346. u32 vref = 0x4;
  1347. u32 start_xsb_offset = 0;
  1348. u8 cs_mask_reg[] = {
  1349. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1350. };
  1351. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1352. u32 read_type, u32 burst_length)
  1353. {
  1354. return MV_OK;
  1355. }
  1356. #endif