ddr3_debug.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <i2c.h>
  8. #include <spl.h>
  9. #include <asm/io.h>
  10. #include <asm/arch/cpu.h>
  11. #include <asm/arch/soc.h>
  12. #include "ddr3_init.h"
  13. u8 is_reg_dump = 0;
  14. u8 debug_pbs = DEBUG_LEVEL_ERROR;
  15. /*
  16. * API to change flags outside of the lib
  17. */
  18. #ifndef SILENT_LIB
  19. /* Debug flags for other Training modules */
  20. u8 debug_training_static = DEBUG_LEVEL_ERROR;
  21. u8 debug_training = DEBUG_LEVEL_ERROR;
  22. u8 debug_leveling = DEBUG_LEVEL_ERROR;
  23. u8 debug_centralization = DEBUG_LEVEL_ERROR;
  24. u8 debug_training_ip = DEBUG_LEVEL_ERROR;
  25. u8 debug_training_bist = DEBUG_LEVEL_ERROR;
  26. u8 debug_training_hw_alg = DEBUG_LEVEL_ERROR;
  27. u8 debug_training_access = DEBUG_LEVEL_ERROR;
  28. u8 debug_training_a38x = DEBUG_LEVEL_ERROR;
  29. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  30. {
  31. switch (block) {
  32. case DEBUG_BLOCK_STATIC:
  33. debug_training_static = level;
  34. break;
  35. case DEBUG_BLOCK_TRAINING_MAIN:
  36. debug_training = level;
  37. break;
  38. case DEBUG_BLOCK_LEVELING:
  39. debug_leveling = level;
  40. break;
  41. case DEBUG_BLOCK_CENTRALIZATION:
  42. debug_centralization = level;
  43. break;
  44. case DEBUG_BLOCK_PBS:
  45. debug_pbs = level;
  46. break;
  47. case DEBUG_BLOCK_ALG:
  48. debug_training_hw_alg = level;
  49. break;
  50. case DEBUG_BLOCK_DEVICE:
  51. debug_training_a38x = level;
  52. break;
  53. case DEBUG_BLOCK_ACCESS:
  54. debug_training_access = level;
  55. break;
  56. case DEBUG_STAGES_REG_DUMP:
  57. if (level == DEBUG_LEVEL_TRACE)
  58. is_reg_dump = 1;
  59. else
  60. is_reg_dump = 0;
  61. break;
  62. case DEBUG_BLOCK_ALL:
  63. default:
  64. debug_training_static = level;
  65. debug_training = level;
  66. debug_leveling = level;
  67. debug_centralization = level;
  68. debug_pbs = level;
  69. debug_training_hw_alg = level;
  70. debug_training_access = level;
  71. debug_training_a38x = level;
  72. }
  73. }
  74. #else
  75. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  76. {
  77. return;
  78. }
  79. #endif
  80. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  81. u8 is_default_centralization = 0;
  82. u8 is_tune_result = 0;
  83. u8 is_validate_window_per_if = 0;
  84. u8 is_validate_window_per_pup = 0;
  85. u8 sweep_cnt = 1;
  86. u32 is_bist_reset_bit = 1;
  87. static struct hws_xsb_info xsb_info[HWS_MAX_DEVICE_NUM];
  88. /*
  89. * Dump Dunit & Phy registers
  90. */
  91. int ddr3_tip_reg_dump(u32 dev_num)
  92. {
  93. u32 if_id, reg_addr, data_value, bus_id;
  94. u32 read_data[MAX_INTERFACE_NUM];
  95. struct hws_topology_map *tm = ddr3_get_topology_map();
  96. printf("-- dunit registers --\n");
  97. for (reg_addr = 0x1400; reg_addr < 0x19f0; reg_addr += 4) {
  98. printf("0x%x ", reg_addr);
  99. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  100. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  101. CHECK_STATUS(ddr3_tip_if_read
  102. (dev_num, ACCESS_TYPE_UNICAST,
  103. if_id, reg_addr, read_data,
  104. MASK_ALL_BITS));
  105. printf("0x%x ", read_data[if_id]);
  106. }
  107. printf("\n");
  108. }
  109. printf("-- Phy registers --\n");
  110. for (reg_addr = 0; reg_addr <= 0xff; reg_addr++) {
  111. printf("0x%x ", reg_addr);
  112. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  113. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  114. for (bus_id = 0;
  115. bus_id < tm->num_of_bus_per_interface;
  116. bus_id++) {
  117. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  118. CHECK_STATUS(ddr3_tip_bus_read
  119. (dev_num, if_id,
  120. ACCESS_TYPE_UNICAST, bus_id,
  121. DDR_PHY_DATA, reg_addr,
  122. &data_value));
  123. printf("0x%x ", data_value);
  124. }
  125. for (bus_id = 0;
  126. bus_id < tm->num_of_bus_per_interface;
  127. bus_id++) {
  128. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  129. CHECK_STATUS(ddr3_tip_bus_read
  130. (dev_num, if_id,
  131. ACCESS_TYPE_UNICAST, bus_id,
  132. DDR_PHY_CONTROL, reg_addr,
  133. &data_value));
  134. printf("0x%x ", data_value);
  135. }
  136. }
  137. printf("\n");
  138. }
  139. return MV_OK;
  140. }
  141. /*
  142. * Register access func registration
  143. */
  144. int ddr3_tip_init_config_func(u32 dev_num,
  145. struct hws_tip_config_func_db *config_func)
  146. {
  147. if (config_func == NULL)
  148. return MV_BAD_PARAM;
  149. memcpy(&config_func_info[dev_num], config_func,
  150. sizeof(struct hws_tip_config_func_db));
  151. return MV_OK;
  152. }
  153. /*
  154. * Read training result table
  155. */
  156. int hws_ddr3_tip_read_training_result(
  157. u32 dev_num, enum hws_result result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM])
  158. {
  159. dev_num = dev_num;
  160. if (result == NULL)
  161. return MV_BAD_PARAM;
  162. memcpy(result, training_result, sizeof(result));
  163. return MV_OK;
  164. }
  165. /*
  166. * Get training result info pointer
  167. */
  168. enum hws_result *ddr3_tip_get_result_ptr(u32 stage)
  169. {
  170. return training_result[stage];
  171. }
  172. /*
  173. * Device info read
  174. */
  175. int ddr3_tip_get_device_info(u32 dev_num, struct ddr3_device_info *info_ptr)
  176. {
  177. if (config_func_info[dev_num].tip_get_device_info_func != NULL) {
  178. return config_func_info[dev_num].
  179. tip_get_device_info_func((u8) dev_num, info_ptr);
  180. }
  181. return MV_FAIL;
  182. }
  183. #ifndef EXCLUDE_SWITCH_DEBUG
  184. /*
  185. * Convert freq to character string
  186. */
  187. static char *convert_freq(enum hws_ddr_freq freq)
  188. {
  189. switch (freq) {
  190. case DDR_FREQ_LOW_FREQ:
  191. return "DDR_FREQ_LOW_FREQ";
  192. case DDR_FREQ_400:
  193. return "400";
  194. case DDR_FREQ_533:
  195. return "533";
  196. case DDR_FREQ_667:
  197. return "667";
  198. case DDR_FREQ_800:
  199. return "800";
  200. case DDR_FREQ_933:
  201. return "933";
  202. case DDR_FREQ_1066:
  203. return "1066";
  204. case DDR_FREQ_311:
  205. return "311";
  206. case DDR_FREQ_333:
  207. return "333";
  208. case DDR_FREQ_467:
  209. return "467";
  210. case DDR_FREQ_850:
  211. return "850";
  212. case DDR_FREQ_900:
  213. return "900";
  214. case DDR_FREQ_360:
  215. return "DDR_FREQ_360";
  216. case DDR_FREQ_1000:
  217. return "DDR_FREQ_1000";
  218. default:
  219. return "Unknown Frequency";
  220. }
  221. }
  222. /*
  223. * Convert device ID to character string
  224. */
  225. static char *convert_dev_id(u32 dev_id)
  226. {
  227. switch (dev_id) {
  228. case 0x6800:
  229. return "A38xx";
  230. case 0x6900:
  231. return "A39XX";
  232. case 0xf400:
  233. return "AC3";
  234. case 0xfc00:
  235. return "BC2";
  236. default:
  237. return "Unknown Device";
  238. }
  239. }
  240. /*
  241. * Convert device ID to character string
  242. */
  243. static char *convert_mem_size(u32 dev_id)
  244. {
  245. switch (dev_id) {
  246. case 0:
  247. return "512 MB";
  248. case 1:
  249. return "1 GB";
  250. case 2:
  251. return "2 GB";
  252. case 3:
  253. return "4 GB";
  254. case 4:
  255. return "8 GB";
  256. default:
  257. return "wrong mem size";
  258. }
  259. }
  260. int print_device_info(u8 dev_num)
  261. {
  262. struct ddr3_device_info info_ptr;
  263. struct hws_topology_map *tm = ddr3_get_topology_map();
  264. CHECK_STATUS(ddr3_tip_get_device_info(dev_num, &info_ptr));
  265. printf("=== DDR setup START===\n");
  266. printf("\tDevice ID: %s\n", convert_dev_id(info_ptr.device_id));
  267. printf("\tDDR3 CK delay: %d\n", info_ptr.ck_delay);
  268. print_topology(tm);
  269. printf("=== DDR setup END===\n");
  270. return MV_OK;
  271. }
  272. void hws_ddr3_tip_sweep_test(int enable)
  273. {
  274. if (enable) {
  275. is_validate_window_per_if = 1;
  276. is_validate_window_per_pup = 1;
  277. debug_training = DEBUG_LEVEL_TRACE;
  278. } else {
  279. is_validate_window_per_if = 0;
  280. is_validate_window_per_pup = 0;
  281. }
  282. }
  283. #endif
  284. char *ddr3_tip_convert_tune_result(enum hws_result tune_result)
  285. {
  286. switch (tune_result) {
  287. case TEST_FAILED:
  288. return "FAILED";
  289. case TEST_SUCCESS:
  290. return "PASS";
  291. case NO_TEST_DONE:
  292. return "NOT COMPLETED";
  293. default:
  294. return "Un-KNOWN";
  295. }
  296. }
  297. /*
  298. * Print log info
  299. */
  300. int ddr3_tip_print_log(u32 dev_num, u32 mem_addr)
  301. {
  302. u32 if_id = 0;
  303. struct hws_topology_map *tm = ddr3_get_topology_map();
  304. mem_addr = mem_addr;
  305. #ifndef EXCLUDE_SWITCH_DEBUG
  306. if ((is_validate_window_per_if != 0) ||
  307. (is_validate_window_per_pup != 0)) {
  308. u32 is_pup_log = 0;
  309. enum hws_ddr_freq freq;
  310. freq = tm->interface_params[first_active_if].memory_freq;
  311. is_pup_log = (is_validate_window_per_pup != 0) ? 1 : 0;
  312. printf("===VALIDATE WINDOW LOG START===\n");
  313. printf("DDR Frequency: %s ======\n", convert_freq(freq));
  314. /* print sweep windows */
  315. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 1, is_pup_log);
  316. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 0, is_pup_log);
  317. ddr3_tip_print_all_pbs_result(dev_num);
  318. ddr3_tip_print_wl_supp_result(dev_num);
  319. printf("===VALIDATE WINDOW LOG END ===\n");
  320. CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num));
  321. ddr3_tip_reg_dump(dev_num);
  322. }
  323. #endif
  324. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  325. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  326. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  327. ("IF %d Status:\n", if_id));
  328. if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) {
  329. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  330. ("\tInit Controller: %s\n",
  331. ddr3_tip_convert_tune_result
  332. (training_result[INIT_CONTROLLER]
  333. [if_id])));
  334. }
  335. if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) {
  336. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  337. ("\tLow freq Config: %s\n",
  338. ddr3_tip_convert_tune_result
  339. (training_result[SET_LOW_FREQ]
  340. [if_id])));
  341. }
  342. if (mask_tune_func & LOAD_PATTERN_MASK_BIT) {
  343. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  344. ("\tLoad Pattern: %s\n",
  345. ddr3_tip_convert_tune_result
  346. (training_result[LOAD_PATTERN]
  347. [if_id])));
  348. }
  349. if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) {
  350. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  351. ("\tMedium freq Config: %s\n",
  352. ddr3_tip_convert_tune_result
  353. (training_result[SET_MEDIUM_FREQ]
  354. [if_id])));
  355. }
  356. if (mask_tune_func & WRITE_LEVELING_MASK_BIT) {
  357. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  358. ("\tWL: %s\n",
  359. ddr3_tip_convert_tune_result
  360. (training_result[WRITE_LEVELING]
  361. [if_id])));
  362. }
  363. if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) {
  364. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  365. ("\tLoad Pattern: %s\n",
  366. ddr3_tip_convert_tune_result
  367. (training_result[LOAD_PATTERN_2]
  368. [if_id])));
  369. }
  370. if (mask_tune_func & READ_LEVELING_MASK_BIT) {
  371. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  372. ("\tRL: %s\n",
  373. ddr3_tip_convert_tune_result
  374. (training_result[READ_LEVELING]
  375. [if_id])));
  376. }
  377. if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) {
  378. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  379. ("\tWL Supp: %s\n",
  380. ddr3_tip_convert_tune_result
  381. (training_result[WRITE_LEVELING_SUPP]
  382. [if_id])));
  383. }
  384. if (mask_tune_func & PBS_RX_MASK_BIT) {
  385. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  386. ("\tPBS RX: %s\n",
  387. ddr3_tip_convert_tune_result
  388. (training_result[PBS_RX]
  389. [if_id])));
  390. }
  391. if (mask_tune_func & PBS_TX_MASK_BIT) {
  392. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  393. ("\tPBS TX: %s\n",
  394. ddr3_tip_convert_tune_result
  395. (training_result[PBS_TX]
  396. [if_id])));
  397. }
  398. if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
  399. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  400. ("\tTarget freq Config: %s\n",
  401. ddr3_tip_convert_tune_result
  402. (training_result[SET_TARGET_FREQ]
  403. [if_id])));
  404. }
  405. if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) {
  406. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  407. ("\tWL TF: %s\n",
  408. ddr3_tip_convert_tune_result
  409. (training_result[WRITE_LEVELING_TF]
  410. [if_id])));
  411. }
  412. if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) {
  413. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  414. ("\tRL TF: %s\n",
  415. ddr3_tip_convert_tune_result
  416. (training_result[READ_LEVELING_TF]
  417. [if_id])));
  418. }
  419. if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
  420. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  421. ("\tWL TF Supp: %s\n",
  422. ddr3_tip_convert_tune_result
  423. (training_result
  424. [WRITE_LEVELING_SUPP_TF]
  425. [if_id])));
  426. }
  427. if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) {
  428. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  429. ("\tCentr RX: %s\n",
  430. ddr3_tip_convert_tune_result
  431. (training_result[CENTRALIZATION_RX]
  432. [if_id])));
  433. }
  434. if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) {
  435. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  436. ("\tVREF_CALIBRATION: %s\n",
  437. ddr3_tip_convert_tune_result
  438. (training_result[VREF_CALIBRATION]
  439. [if_id])));
  440. }
  441. if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
  442. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  443. ("\tCentr TX: %s\n",
  444. ddr3_tip_convert_tune_result
  445. (training_result[CENTRALIZATION_TX]
  446. [if_id])));
  447. }
  448. }
  449. return MV_OK;
  450. }
  451. /*
  452. * Print stability log info
  453. */
  454. int ddr3_tip_print_stability_log(u32 dev_num)
  455. {
  456. u8 if_id = 0, csindex = 0, bus_id = 0, idx = 0;
  457. u32 reg_data;
  458. u32 read_data[MAX_INTERFACE_NUM];
  459. u32 max_cs = hws_ddr3_tip_max_cs_get();
  460. struct hws_topology_map *tm = ddr3_get_topology_map();
  461. /* Title print */
  462. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  463. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  464. printf("Title: I/F# , Tj, Calibration_n0, Calibration_p0, Calibration_n1, Calibration_p1, Calibration_n2, Calibration_p2,");
  465. for (csindex = 0; csindex < max_cs; csindex++) {
  466. printf("CS%d , ", csindex);
  467. printf("\n");
  468. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  469. printf("VWTx, VWRx, WL_tot, WL_ADLL, WL_PH, RL_Tot, RL_ADLL, RL_PH, RL_Smp, Cen_tx, Cen_rx, Vref, DQVref,");
  470. printf("\t\t");
  471. for (idx = 0; idx < 11; idx++)
  472. printf("PBSTx-Pad%d,", idx);
  473. printf("\t\t");
  474. for (idx = 0; idx < 11; idx++)
  475. printf("PBSRx-Pad%d,", idx);
  476. }
  477. }
  478. printf("\n");
  479. /* Data print */
  480. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  481. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  482. printf("Data: %d,%d,", if_id,
  483. (config_func_info[dev_num].tip_get_temperature != NULL)
  484. ? (config_func_info[dev_num].
  485. tip_get_temperature(dev_num)) : (0));
  486. CHECK_STATUS(ddr3_tip_if_read
  487. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14c8,
  488. read_data, MASK_ALL_BITS));
  489. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  490. ((read_data[if_id] & 0xfc00) >> 10));
  491. CHECK_STATUS(ddr3_tip_if_read
  492. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17c8,
  493. read_data, MASK_ALL_BITS));
  494. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  495. ((read_data[if_id] & 0xfc00) >> 10));
  496. CHECK_STATUS(ddr3_tip_if_read
  497. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1dc8,
  498. read_data, MASK_ALL_BITS));
  499. printf("%d,%d,", ((read_data[if_id] & 0x3f0000) >> 16),
  500. ((read_data[if_id] & 0xfc00000) >> 22));
  501. for (csindex = 0; csindex < max_cs; csindex++) {
  502. printf("CS%d , ", csindex);
  503. for (bus_id = 0; bus_id < MAX_BUS_NUM; bus_id++) {
  504. printf("\n");
  505. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  506. ddr3_tip_bus_read(dev_num, if_id,
  507. ACCESS_TYPE_UNICAST,
  508. bus_id, DDR_PHY_DATA,
  509. RESULT_DB_PHY_REG_ADDR +
  510. csindex, &reg_data);
  511. printf("%d,%d,", (reg_data & 0x1f),
  512. ((reg_data & 0x3e0) >> 5));
  513. /* WL */
  514. ddr3_tip_bus_read(dev_num, if_id,
  515. ACCESS_TYPE_UNICAST,
  516. bus_id, DDR_PHY_DATA,
  517. WL_PHY_REG +
  518. csindex * 4, &reg_data);
  519. printf("%d,%d,%d,",
  520. (reg_data & 0x1f) +
  521. ((reg_data & 0x1c0) >> 6) * 32,
  522. (reg_data & 0x1f),
  523. (reg_data & 0x1c0) >> 6);
  524. /* RL */
  525. CHECK_STATUS(ddr3_tip_if_read
  526. (dev_num, ACCESS_TYPE_UNICAST,
  527. if_id,
  528. READ_DATA_SAMPLE_DELAY,
  529. read_data, MASK_ALL_BITS));
  530. read_data[if_id] =
  531. (read_data[if_id] &
  532. (0xf << (4 * csindex))) >>
  533. (4 * csindex);
  534. ddr3_tip_bus_read(dev_num, if_id,
  535. ACCESS_TYPE_UNICAST, bus_id,
  536. DDR_PHY_DATA,
  537. RL_PHY_REG + csindex * 4,
  538. &reg_data);
  539. printf("%d,%d,%d,%d,",
  540. (reg_data & 0x1f) +
  541. ((reg_data & 0x1c0) >> 6) * 32 +
  542. read_data[if_id] * 64,
  543. (reg_data & 0x1f),
  544. ((reg_data & 0x1c0) >> 6),
  545. read_data[if_id]);
  546. /* Centralization */
  547. ddr3_tip_bus_read(dev_num, if_id,
  548. ACCESS_TYPE_UNICAST, bus_id,
  549. DDR_PHY_DATA,
  550. WRITE_CENTRALIZATION_PHY_REG
  551. + csindex * 4, &reg_data);
  552. printf("%d,", (reg_data & 0x3f));
  553. ddr3_tip_bus_read(dev_num, if_id,
  554. ACCESS_TYPE_UNICAST, bus_id,
  555. DDR_PHY_DATA,
  556. READ_CENTRALIZATION_PHY_REG
  557. + csindex * 4, &reg_data);
  558. printf("%d,", (reg_data & 0x1f));
  559. /* Vref */
  560. ddr3_tip_bus_read(dev_num, if_id,
  561. ACCESS_TYPE_UNICAST, bus_id,
  562. DDR_PHY_DATA,
  563. PAD_CONFIG_PHY_REG,
  564. &reg_data);
  565. printf("%d,", (reg_data & 0x7));
  566. /* DQVref */
  567. /* Need to add the Read Function from device */
  568. printf("%d,", 0);
  569. printf("\t\t");
  570. for (idx = 0; idx < 11; idx++) {
  571. ddr3_tip_bus_read(dev_num, if_id,
  572. ACCESS_TYPE_UNICAST,
  573. bus_id, DDR_PHY_DATA,
  574. 0xd0 +
  575. 12 * csindex +
  576. idx, &reg_data);
  577. printf("%d,", (reg_data & 0x3f));
  578. }
  579. printf("\t\t");
  580. for (idx = 0; idx < 11; idx++) {
  581. ddr3_tip_bus_read(dev_num, if_id,
  582. ACCESS_TYPE_UNICAST,
  583. bus_id, DDR_PHY_DATA,
  584. 0x10 +
  585. 16 * csindex +
  586. idx, &reg_data);
  587. printf("%d,", (reg_data & 0x3f));
  588. }
  589. printf("\t\t");
  590. for (idx = 0; idx < 11; idx++) {
  591. ddr3_tip_bus_read(dev_num, if_id,
  592. ACCESS_TYPE_UNICAST,
  593. bus_id, DDR_PHY_DATA,
  594. 0x50 +
  595. 16 * csindex +
  596. idx, &reg_data);
  597. printf("%d,", (reg_data & 0x3f));
  598. }
  599. }
  600. }
  601. }
  602. printf("\n");
  603. return MV_OK;
  604. }
  605. /*
  606. * Register XSB information
  607. */
  608. int ddr3_tip_register_xsb_info(u32 dev_num, struct hws_xsb_info *xsb_info_table)
  609. {
  610. memcpy(&xsb_info[dev_num], xsb_info_table, sizeof(struct hws_xsb_info));
  611. return MV_OK;
  612. }
  613. /*
  614. * Read ADLL Value
  615. */
  616. int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  617. int reg_addr, u32 mask)
  618. {
  619. u32 data_value;
  620. u32 if_id = 0, bus_id = 0;
  621. u32 dev_num = 0;
  622. struct hws_topology_map *tm = ddr3_get_topology_map();
  623. /*
  624. * multi CS support - reg_addr is calucalated in calling function
  625. * with CS offset
  626. */
  627. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  628. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  629. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  630. bus_id++) {
  631. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  632. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  633. ACCESS_TYPE_UNICAST,
  634. bus_id,
  635. DDR_PHY_DATA, reg_addr,
  636. &data_value));
  637. pup_values[if_id *
  638. tm->num_of_bus_per_interface + bus_id] =
  639. data_value & mask;
  640. }
  641. }
  642. return 0;
  643. }
  644. /*
  645. * Write ADLL Value
  646. */
  647. int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  648. int reg_addr)
  649. {
  650. u32 if_id = 0, bus_id = 0;
  651. u32 dev_num = 0, data;
  652. struct hws_topology_map *tm = ddr3_get_topology_map();
  653. /*
  654. * multi CS support - reg_addr is calucalated in calling function
  655. * with CS offset
  656. */
  657. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  658. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  659. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  660. bus_id++) {
  661. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  662. data = pup_values[if_id *
  663. tm->num_of_bus_per_interface +
  664. bus_id];
  665. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  666. ACCESS_TYPE_UNICAST,
  667. if_id,
  668. ACCESS_TYPE_UNICAST,
  669. bus_id, DDR_PHY_DATA,
  670. reg_addr, data));
  671. }
  672. }
  673. return 0;
  674. }
  675. #ifndef EXCLUDE_SWITCH_DEBUG
  676. u32 rl_version = 1; /* 0 - old RL machine */
  677. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  678. u32 start_xsb_offset = 0;
  679. u8 is_rl_old = 0;
  680. u8 is_freq_old = 0;
  681. u8 is_dfs_disabled = 0;
  682. u32 default_centrlization_value = 0x12;
  683. u32 vref = 0x4;
  684. u32 activate_select_before_run_alg = 1, activate_deselect_after_run_alg = 1,
  685. rl_test = 0, reset_read_fifo = 0;
  686. int debug_acc = 0;
  687. u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM];
  688. u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  689. u8 cs_mask_reg[] = {
  690. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  691. };
  692. u32 xsb_test_table[][8] = {
  693. {0x00000000, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555,
  694. 0x66666666, 0x77777777},
  695. {0x88888888, 0x99999999, 0xaaaaaaaa, 0xbbbbbbbb, 0xcccccccc, 0xdddddddd,
  696. 0xeeeeeeee, 0xffffffff},
  697. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  698. 0x00000000, 0xffffffff},
  699. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  700. 0x00000000, 0xffffffff},
  701. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  702. 0x00000000, 0xffffffff},
  703. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  704. 0x00000000, 0xffffffff},
  705. {0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
  706. 0xffffffff, 0xffffffff},
  707. {0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000,
  708. 0x00000000, 0x00000000},
  709. {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
  710. 0xffffffff, 0xffffffff}
  711. };
  712. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr);
  713. int ddr3_tip_print_adll(void)
  714. {
  715. u32 bus_cnt = 0, if_id, data_p1, data_p2, ui_data3, dev_num = 0;
  716. struct hws_topology_map *tm = ddr3_get_topology_map();
  717. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  718. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  719. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES();
  720. bus_cnt++) {
  721. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  722. CHECK_STATUS(ddr3_tip_bus_read
  723. (dev_num, if_id,
  724. ACCESS_TYPE_UNICAST, bus_cnt,
  725. DDR_PHY_DATA, 0x1, &data_p1));
  726. CHECK_STATUS(ddr3_tip_bus_read
  727. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  728. bus_cnt, DDR_PHY_DATA, 0x2, &data_p2));
  729. CHECK_STATUS(ddr3_tip_bus_read
  730. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  731. bus_cnt, DDR_PHY_DATA, 0x3, &ui_data3));
  732. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  733. (" IF %d bus_cnt %d phy_reg_1_data 0x%x phy_reg_2_data 0x%x phy_reg_3_data 0x%x\n",
  734. if_id, bus_cnt, data_p1, data_p2,
  735. ui_data3));
  736. }
  737. }
  738. return MV_OK;
  739. }
  740. /*
  741. * Set attribute value
  742. */
  743. int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value)
  744. {
  745. int ret;
  746. u32 *ptr_flag = NULL;
  747. ret = ddr3_tip_access_atr(dev_num, flag_id, value, &ptr_flag);
  748. if (ptr_flag != NULL) {
  749. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x (was 0x%x)\n",
  750. flag_id, value, *ptr_flag);
  751. *ptr_flag = value;
  752. } else {
  753. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x\n",
  754. flag_id, value);
  755. }
  756. return ret;
  757. }
  758. /*
  759. * Access attribute
  760. */
  761. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr)
  762. {
  763. u32 tmp_val = 0, if_id = 0, pup_id = 0;
  764. struct hws_topology_map *tm = ddr3_get_topology_map();
  765. dev_num = dev_num;
  766. *ptr = NULL;
  767. switch (flag_id) {
  768. case 0:
  769. *ptr = (u32 *)&(tm->if_act_mask);
  770. break;
  771. case 0x1:
  772. *ptr = (u32 *)&mask_tune_func;
  773. break;
  774. case 0x2:
  775. *ptr = (u32 *)&low_freq;
  776. break;
  777. case 0x3:
  778. *ptr = (u32 *)&medium_freq;
  779. break;
  780. case 0x4:
  781. *ptr = (u32 *)&generic_init_controller;
  782. break;
  783. case 0x5:
  784. *ptr = (u32 *)&rl_version;
  785. break;
  786. case 0x8:
  787. *ptr = (u32 *)&start_xsb_offset;
  788. break;
  789. case 0x20:
  790. *ptr = (u32 *)&is_rl_old;
  791. break;
  792. case 0x21:
  793. *ptr = (u32 *)&is_freq_old;
  794. break;
  795. case 0x23:
  796. *ptr = (u32 *)&is_dfs_disabled;
  797. break;
  798. case 0x24:
  799. *ptr = (u32 *)&is_pll_before_init;
  800. break;
  801. case 0x25:
  802. *ptr = (u32 *)&is_adll_calib_before_init;
  803. break;
  804. #ifdef STATIC_ALGO_SUPPORT
  805. case 0x26:
  806. *ptr = (u32 *)&(silicon_delay[0]);
  807. break;
  808. case 0x27:
  809. *ptr = (u32 *)&wl_debug_delay;
  810. break;
  811. #endif
  812. case 0x28:
  813. *ptr = (u32 *)&is_tune_result;
  814. break;
  815. case 0x29:
  816. *ptr = (u32 *)&is_validate_window_per_if;
  817. break;
  818. case 0x2a:
  819. *ptr = (u32 *)&is_validate_window_per_pup;
  820. break;
  821. case 0x30:
  822. *ptr = (u32 *)&sweep_cnt;
  823. break;
  824. case 0x31:
  825. *ptr = (u32 *)&is_bist_reset_bit;
  826. break;
  827. case 0x32:
  828. *ptr = (u32 *)&is_dfs_in_init;
  829. break;
  830. case 0x33:
  831. *ptr = (u32 *)&p_finger;
  832. break;
  833. case 0x34:
  834. *ptr = (u32 *)&n_finger;
  835. break;
  836. case 0x35:
  837. *ptr = (u32 *)&init_freq;
  838. break;
  839. case 0x36:
  840. *ptr = (u32 *)&(freq_val[DDR_FREQ_LOW_FREQ]);
  841. break;
  842. case 0x37:
  843. *ptr = (u32 *)&start_pattern;
  844. break;
  845. case 0x38:
  846. *ptr = (u32 *)&end_pattern;
  847. break;
  848. case 0x39:
  849. *ptr = (u32 *)&phy_reg0_val;
  850. break;
  851. case 0x4a:
  852. *ptr = (u32 *)&phy_reg1_val;
  853. break;
  854. case 0x4b:
  855. *ptr = (u32 *)&phy_reg2_val;
  856. break;
  857. case 0x4c:
  858. *ptr = (u32 *)&phy_reg3_val;
  859. break;
  860. case 0x4e:
  861. *ptr = (u32 *)&sweep_pattern;
  862. break;
  863. case 0x50:
  864. *ptr = (u32 *)&is_rzq6;
  865. break;
  866. case 0x51:
  867. *ptr = (u32 *)&znri_data_phy_val;
  868. break;
  869. case 0x52:
  870. *ptr = (u32 *)&zpri_data_phy_val;
  871. break;
  872. case 0x53:
  873. *ptr = (u32 *)&finger_test;
  874. break;
  875. case 0x54:
  876. *ptr = (u32 *)&n_finger_start;
  877. break;
  878. case 0x55:
  879. *ptr = (u32 *)&n_finger_end;
  880. break;
  881. case 0x56:
  882. *ptr = (u32 *)&p_finger_start;
  883. break;
  884. case 0x57:
  885. *ptr = (u32 *)&p_finger_end;
  886. break;
  887. case 0x58:
  888. *ptr = (u32 *)&p_finger_step;
  889. break;
  890. case 0x59:
  891. *ptr = (u32 *)&n_finger_step;
  892. break;
  893. case 0x5a:
  894. *ptr = (u32 *)&znri_ctrl_phy_val;
  895. break;
  896. case 0x5b:
  897. *ptr = (u32 *)&zpri_ctrl_phy_val;
  898. break;
  899. case 0x5c:
  900. *ptr = (u32 *)&is_reg_dump;
  901. break;
  902. case 0x5d:
  903. *ptr = (u32 *)&vref;
  904. break;
  905. case 0x5e:
  906. *ptr = (u32 *)&mode2_t;
  907. break;
  908. case 0x5f:
  909. *ptr = (u32 *)&xsb_validate_type;
  910. break;
  911. case 0x60:
  912. *ptr = (u32 *)&xsb_validation_base_address;
  913. break;
  914. case 0x67:
  915. *ptr = (u32 *)&activate_select_before_run_alg;
  916. break;
  917. case 0x68:
  918. *ptr = (u32 *)&activate_deselect_after_run_alg;
  919. break;
  920. case 0x69:
  921. *ptr = (u32 *)&odt_additional;
  922. break;
  923. case 0x70:
  924. *ptr = (u32 *)&debug_mode;
  925. break;
  926. case 0x71:
  927. *ptr = (u32 *)&pbs_pattern;
  928. break;
  929. case 0x72:
  930. *ptr = (u32 *)&delay_enable;
  931. break;
  932. case 0x73:
  933. *ptr = (u32 *)&ck_delay;
  934. break;
  935. case 0x74:
  936. *ptr = (u32 *)&ck_delay_16;
  937. break;
  938. case 0x75:
  939. *ptr = (u32 *)&ca_delay;
  940. break;
  941. case 0x100:
  942. *ptr = (u32 *)&debug_dunit;
  943. break;
  944. case 0x101:
  945. debug_acc = (int)value;
  946. break;
  947. case 0x102:
  948. debug_training = (u8)value;
  949. break;
  950. case 0x103:
  951. debug_training_bist = (u8)value;
  952. break;
  953. case 0x104:
  954. debug_centralization = (u8)value;
  955. break;
  956. case 0x105:
  957. debug_training_ip = (u8)value;
  958. break;
  959. case 0x106:
  960. debug_leveling = (u8)value;
  961. break;
  962. case 0x107:
  963. debug_pbs = (u8)value;
  964. break;
  965. case 0x108:
  966. debug_training_static = (u8)value;
  967. break;
  968. case 0x109:
  969. debug_training_access = (u8)value;
  970. break;
  971. case 0x112:
  972. *ptr = &start_pattern;
  973. break;
  974. case 0x113:
  975. *ptr = &end_pattern;
  976. break;
  977. default:
  978. if ((flag_id >= 0x200) && (flag_id < 0x210)) {
  979. if_id = flag_id - 0x200;
  980. *ptr = (u32 *)&(tm->interface_params
  981. [if_id].memory_freq);
  982. } else if ((flag_id >= 0x210) && (flag_id < 0x220)) {
  983. if_id = flag_id - 0x210;
  984. *ptr = (u32 *)&(tm->interface_params
  985. [if_id].speed_bin_index);
  986. } else if ((flag_id >= 0x220) && (flag_id < 0x230)) {
  987. if_id = flag_id - 0x220;
  988. *ptr = (u32 *)&(tm->interface_params
  989. [if_id].bus_width);
  990. } else if ((flag_id >= 0x230) && (flag_id < 0x240)) {
  991. if_id = flag_id - 0x230;
  992. *ptr = (u32 *)&(tm->interface_params
  993. [if_id].memory_size);
  994. } else if ((flag_id >= 0x240) && (flag_id < 0x250)) {
  995. if_id = flag_id - 0x240;
  996. *ptr = (u32 *)&(tm->interface_params
  997. [if_id].cas_l);
  998. } else if ((flag_id >= 0x250) && (flag_id < 0x260)) {
  999. if_id = flag_id - 0x250;
  1000. *ptr = (u32 *)&(tm->interface_params
  1001. [if_id].cas_wl);
  1002. } else if ((flag_id >= 0x270) && (flag_id < 0x2cf)) {
  1003. if_id = (flag_id - 0x270) / MAX_BUS_NUM;
  1004. pup_id = (flag_id - 0x270) % MAX_BUS_NUM;
  1005. *ptr = (u32 *)&(tm->interface_params[if_id].
  1006. as_bus_params[pup_id].is_ck_swap);
  1007. } else if ((flag_id >= 0x2d0) && (flag_id < 0x32f)) {
  1008. if_id = (flag_id - 0x2d0) / MAX_BUS_NUM;
  1009. pup_id = (flag_id - 0x2d0) % MAX_BUS_NUM;
  1010. *ptr = (u32 *)&(tm->interface_params[if_id].
  1011. as_bus_params[pup_id].is_dqs_swap);
  1012. } else if ((flag_id >= 0x330) && (flag_id < 0x38f)) {
  1013. if_id = (flag_id - 0x330) / MAX_BUS_NUM;
  1014. pup_id = (flag_id - 0x330) % MAX_BUS_NUM;
  1015. *ptr = (u32 *)&(tm->interface_params[if_id].
  1016. as_bus_params[pup_id].cs_bitmask);
  1017. } else if ((flag_id >= 0x390) && (flag_id < 0x3ef)) {
  1018. if_id = (flag_id - 0x390) / MAX_BUS_NUM;
  1019. pup_id = (flag_id - 0x390) % MAX_BUS_NUM;
  1020. *ptr = (u32 *)&(tm->interface_params
  1021. [if_id].as_bus_params
  1022. [pup_id].mirror_enable_bitmask);
  1023. } else if ((flag_id >= 0x500) && (flag_id <= 0x50f)) {
  1024. tmp_val = flag_id - 0x320;
  1025. *ptr = (u32 *)&(clamp_tbl[tmp_val]);
  1026. } else {
  1027. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1028. ("flag_id out of boundary %d\n",
  1029. flag_id));
  1030. return MV_BAD_PARAM;
  1031. }
  1032. }
  1033. return MV_OK;
  1034. }
  1035. #ifndef EXCLUDE_SWITCH_DEBUG
  1036. /*
  1037. * Print ADLL
  1038. */
  1039. int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM])
  1040. {
  1041. u32 i, j;
  1042. struct hws_topology_map *tm = ddr3_get_topology_map();
  1043. dev_num = dev_num;
  1044. for (j = 0; j < tm->num_of_bus_per_interface; j++) {
  1045. VALIDATE_ACTIVE(tm->bus_act_mask, j);
  1046. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  1047. printf("%d ,",
  1048. adll[i * tm->num_of_bus_per_interface + j]);
  1049. }
  1050. }
  1051. printf("\n");
  1052. return MV_OK;
  1053. }
  1054. #endif
  1055. /* byte_index - only byte 0, 1, 2, or 3, oxff - test all bytes */
  1056. static u32 ddr3_tip_compare(u32 if_id, u32 *p_src, u32 *p_dst,
  1057. u32 byte_index)
  1058. {
  1059. u32 burst_cnt = 0, addr_offset, i_id;
  1060. int b_is_fail = 0;
  1061. addr_offset =
  1062. (byte_index ==
  1063. 0xff) ? (u32) 0xffffffff : (u32) (0xff << (byte_index * 8));
  1064. for (burst_cnt = 0; burst_cnt < EXT_ACCESS_BURST_LENGTH; burst_cnt++) {
  1065. if ((p_src[burst_cnt] & addr_offset) !=
  1066. (p_dst[burst_cnt] & addr_offset))
  1067. b_is_fail = 1;
  1068. }
  1069. if (b_is_fail == 1) {
  1070. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1071. ("IF %d exp: ", if_id));
  1072. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1073. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1074. ("0x%8x ", p_src[i_id]));
  1075. }
  1076. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1077. ("\n_i_f %d rcv: ", if_id));
  1078. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1079. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1080. ("(0x%8x ", p_dst[i_id]));
  1081. }
  1082. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("\n "));
  1083. }
  1084. return b_is_fail;
  1085. }
  1086. /* test_type = 0-tx , 1-rx */
  1087. int ddr3_tip_sweep_test(u32 dev_num, u32 test_type,
  1088. u32 mem_addr, u32 is_modify_adll,
  1089. u32 start_if, u32 end_if, u32 startpup, u32 endpup)
  1090. {
  1091. u32 bus_cnt = 0, adll_val = 0, if_id, ui_prev_adll, ui_mask_bit,
  1092. end_adll, start_adll;
  1093. u32 reg_addr = 0;
  1094. struct hws_topology_map *tm = ddr3_get_topology_map();
  1095. mem_addr = mem_addr;
  1096. if (test_type == 0) {
  1097. reg_addr = 1;
  1098. ui_mask_bit = 0x3f;
  1099. start_adll = 0;
  1100. end_adll = ui_mask_bit;
  1101. } else {
  1102. reg_addr = 3;
  1103. ui_mask_bit = 0x1f;
  1104. start_adll = 0;
  1105. end_adll = ui_mask_bit;
  1106. }
  1107. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1108. ("==============================\n"));
  1109. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1110. ("Test type %d (0-tx, 1-rx)\n", test_type));
  1111. for (if_id = start_if; if_id <= end_if; if_id++) {
  1112. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1113. for (bus_cnt = startpup; bus_cnt < endpup; bus_cnt++) {
  1114. CHECK_STATUS(ddr3_tip_bus_read
  1115. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1116. bus_cnt, DDR_PHY_DATA, reg_addr,
  1117. &ui_prev_adll));
  1118. for (adll_val = start_adll; adll_val <= end_adll;
  1119. adll_val++) {
  1120. if (is_modify_adll == 1) {
  1121. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1122. (dev_num,
  1123. ACCESS_TYPE_UNICAST,
  1124. if_id, bus_cnt,
  1125. DDR_PHY_DATA, reg_addr,
  1126. adll_val, ui_mask_bit));
  1127. }
  1128. }
  1129. if (is_modify_adll == 1) {
  1130. CHECK_STATUS(ddr3_tip_bus_write
  1131. (dev_num, ACCESS_TYPE_UNICAST,
  1132. if_id, ACCESS_TYPE_UNICAST,
  1133. bus_cnt, DDR_PHY_DATA, reg_addr,
  1134. ui_prev_adll));
  1135. }
  1136. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1137. }
  1138. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1139. }
  1140. return MV_OK;
  1141. }
  1142. #ifndef EXCLUDE_SWITCH_DEBUG
  1143. /*
  1144. * Sweep validation
  1145. */
  1146. int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
  1147. u32 mode)
  1148. {
  1149. u32 pup = 0, start_pup = 0, end_pup = 0;
  1150. u32 adll = 0;
  1151. u32 res[MAX_INTERFACE_NUM] = { 0 };
  1152. int if_id = 0;
  1153. u32 adll_value = 0;
  1154. int reg = (direction == 0) ? WRITE_CENTRALIZATION_PHY_REG :
  1155. READ_CENTRALIZATION_PHY_REG;
  1156. enum hws_access_type pup_access;
  1157. u32 cs;
  1158. u32 max_cs = hws_ddr3_tip_max_cs_get();
  1159. struct hws_topology_map *tm = ddr3_get_topology_map();
  1160. repeat_num = repeat_num;
  1161. if (mode == 1) {
  1162. /* per pup */
  1163. start_pup = 0;
  1164. end_pup = tm->num_of_bus_per_interface - 1;
  1165. pup_access = ACCESS_TYPE_UNICAST;
  1166. } else {
  1167. start_pup = 0;
  1168. end_pup = 0;
  1169. pup_access = ACCESS_TYPE_MULTICAST;
  1170. }
  1171. for (cs = 0; cs < max_cs; cs++) {
  1172. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1173. for (if_id = 0;
  1174. if_id <= MAX_INTERFACE_NUM - 1;
  1175. if_id++) {
  1176. VALIDATE_ACTIVE
  1177. (tm->if_act_mask,
  1178. if_id);
  1179. for (pup = start_pup; pup <= end_pup; pup++) {
  1180. ctrl_sweepres[adll][if_id][pup] =
  1181. 0;
  1182. }
  1183. }
  1184. }
  1185. for (adll = 0; adll < (MAX_INTERFACE_NUM * MAX_BUS_NUM); adll++)
  1186. ctrl_adll[adll] = 0;
  1187. /* Save DQS value(after algorithm run) */
  1188. read_adll_value(ctrl_adll,
  1189. (reg + (cs * CS_REGISTER_ADDR_OFFSET)),
  1190. MASK_ALL_BITS);
  1191. /*
  1192. * Sweep ADLL from 0:31 on all I/F on all Pup and perform
  1193. * BIST on each stage.
  1194. */
  1195. for (pup = start_pup; pup <= end_pup; pup++) {
  1196. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1197. adll_value =
  1198. (direction == 0) ? (adll * 2) : adll;
  1199. CHECK_STATUS(ddr3_tip_bus_write
  1200. (dev_num, ACCESS_TYPE_MULTICAST, 0,
  1201. pup_access, pup, DDR_PHY_DATA,
  1202. reg + CS_REG_VALUE(cs),
  1203. adll_value));
  1204. hws_ddr3_run_bist(dev_num, sweep_pattern, res,
  1205. cs);
  1206. /* ddr3_tip_reset_fifo_ptr(dev_num); */
  1207. for (if_id = 0;
  1208. if_id <= MAX_INTERFACE_NUM - 1;
  1209. if_id++) {
  1210. VALIDATE_ACTIVE
  1211. (tm->if_act_mask,
  1212. if_id);
  1213. ctrl_sweepres[adll][if_id][pup]
  1214. = res[if_id];
  1215. if (mode == 1) {
  1216. CHECK_STATUS
  1217. (ddr3_tip_bus_write
  1218. (dev_num,
  1219. ACCESS_TYPE_UNICAST,
  1220. if_id,
  1221. ACCESS_TYPE_UNICAST,
  1222. pup,
  1223. DDR_PHY_DATA,
  1224. reg + CS_REG_VALUE(cs),
  1225. ctrl_adll[if_id *
  1226. cs *
  1227. tm->num_of_bus_per_interface
  1228. + pup]));
  1229. }
  1230. }
  1231. }
  1232. }
  1233. printf("Final, CS %d,%s, Sweep, Result, Adll,", cs,
  1234. ((direction == 0) ? "TX" : "RX"));
  1235. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1236. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1237. if (mode == 1) {
  1238. for (pup = start_pup; pup <= end_pup; pup++) {
  1239. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  1240. printf("I/F%d-PHY%d , ", if_id, pup);
  1241. }
  1242. } else {
  1243. printf("I/F%d , ", if_id);
  1244. }
  1245. }
  1246. printf("\n");
  1247. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1248. adll_value = (direction == 0) ? (adll * 2) : adll;
  1249. printf("Final,%s, Sweep, Result, %d ,",
  1250. ((direction == 0) ? "TX" : "RX"), adll_value);
  1251. for (if_id = 0;
  1252. if_id <= MAX_INTERFACE_NUM - 1;
  1253. if_id++) {
  1254. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1255. for (pup = start_pup; pup <= end_pup; pup++) {
  1256. printf("%d , ",
  1257. ctrl_sweepres[adll][if_id]
  1258. [pup]);
  1259. }
  1260. }
  1261. printf("\n");
  1262. }
  1263. /*
  1264. * Write back to the phy the Rx DQS value, we store in
  1265. * the beginning.
  1266. */
  1267. write_adll_value(ctrl_adll,
  1268. (reg + cs * CS_REGISTER_ADDR_OFFSET));
  1269. /* print adll results */
  1270. read_adll_value(ctrl_adll, (reg + cs * CS_REGISTER_ADDR_OFFSET),
  1271. MASK_ALL_BITS);
  1272. printf("%s, DQS, ADLL,,,", (direction == 0) ? "Tx" : "Rx");
  1273. print_adll(dev_num, ctrl_adll);
  1274. }
  1275. ddr3_tip_reset_fifo_ptr(dev_num);
  1276. return 0;
  1277. }
  1278. void print_topology(struct hws_topology_map *topology_db)
  1279. {
  1280. u32 ui, uj;
  1281. printf("\tinterface_mask: 0x%x\n", topology_db->if_act_mask);
  1282. printf("\tNum Bus: %d\n", topology_db->num_of_bus_per_interface);
  1283. printf("\tbus_act_mask: 0x%x\n", topology_db->bus_act_mask);
  1284. for (ui = 0; ui < MAX_INTERFACE_NUM; ui++) {
  1285. VALIDATE_ACTIVE(topology_db->if_act_mask, ui);
  1286. printf("\n\tInterface ID: %d\n", ui);
  1287. printf("\t\tDDR Frequency: %s\n",
  1288. convert_freq(topology_db->
  1289. interface_params[ui].memory_freq));
  1290. printf("\t\tSpeed_bin: %d\n",
  1291. topology_db->interface_params[ui].speed_bin_index);
  1292. printf("\t\tBus_width: %d\n",
  1293. (4 << topology_db->interface_params[ui].bus_width));
  1294. printf("\t\tMem_size: %s\n",
  1295. convert_mem_size(topology_db->
  1296. interface_params[ui].memory_size));
  1297. printf("\t\tCAS-WL: %d\n",
  1298. topology_db->interface_params[ui].cas_wl);
  1299. printf("\t\tCAS-L: %d\n",
  1300. topology_db->interface_params[ui].cas_l);
  1301. printf("\t\tTemperature: %d\n",
  1302. topology_db->interface_params[ui].interface_temp);
  1303. printf("\n");
  1304. for (uj = 0; uj < 4; uj++) {
  1305. printf("\t\tBus %d parameters- CS Mask: 0x%x\t", uj,
  1306. topology_db->interface_params[ui].
  1307. as_bus_params[uj].cs_bitmask);
  1308. printf("Mirror: 0x%x\t",
  1309. topology_db->interface_params[ui].
  1310. as_bus_params[uj].mirror_enable_bitmask);
  1311. printf("DQS Swap is %s \t",
  1312. (topology_db->
  1313. interface_params[ui].as_bus_params[uj].
  1314. is_dqs_swap == 1) ? "enabled" : "disabled");
  1315. printf("Ck Swap:%s\t",
  1316. (topology_db->
  1317. interface_params[ui].as_bus_params[uj].
  1318. is_ck_swap == 1) ? "enabled" : "disabled");
  1319. printf("\n");
  1320. }
  1321. }
  1322. }
  1323. #endif
  1324. /*
  1325. * Execute XSB Test transaction (rd/wr/both)
  1326. */
  1327. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1328. u32 read_type, u32 burst_length)
  1329. {
  1330. u32 seq = 0, if_id = 0, addr, cnt;
  1331. int ret = MV_OK, ret_tmp;
  1332. u32 data_read[MAX_INTERFACE_NUM];
  1333. struct hws_topology_map *tm = ddr3_get_topology_map();
  1334. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1335. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1336. addr = mem_addr;
  1337. for (cnt = 0; cnt <= burst_length; cnt++) {
  1338. seq = (seq + 1) % 8;
  1339. if (write_type != 0) {
  1340. CHECK_STATUS(ddr3_tip_ext_write
  1341. (dev_num, if_id, addr, 1,
  1342. xsb_test_table[seq]));
  1343. }
  1344. if (read_type != 0) {
  1345. CHECK_STATUS(ddr3_tip_ext_read
  1346. (dev_num, if_id, addr, 1,
  1347. data_read));
  1348. }
  1349. if ((read_type != 0) && (write_type != 0)) {
  1350. ret_tmp =
  1351. ddr3_tip_compare(if_id,
  1352. xsb_test_table[seq],
  1353. data_read,
  1354. 0xff);
  1355. addr += (EXT_ACCESS_BURST_LENGTH * 4);
  1356. ret = (ret != MV_OK) ? ret : ret_tmp;
  1357. }
  1358. }
  1359. }
  1360. return ret;
  1361. }
  1362. #else /*EXCLUDE_SWITCH_DEBUG */
  1363. u32 rl_version = 1; /* 0 - old RL machine */
  1364. u32 vref = 0x4;
  1365. u32 start_xsb_offset = 0;
  1366. u8 cs_mask_reg[] = {
  1367. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1368. };
  1369. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1370. u32 read_type, u32 burst_length)
  1371. {
  1372. return MV_OK;
  1373. }
  1374. #endif