ddr3_debug.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <i2c.h>
  8. #include <spl.h>
  9. #include <asm/io.h>
  10. #include <asm/arch/cpu.h>
  11. #include <asm/arch/soc.h>
  12. #include "ddr3_init.h"
  13. u8 is_reg_dump = 0;
  14. u8 debug_pbs = DEBUG_LEVEL_ERROR;
  15. /*
  16. * API to change flags outside of the lib
  17. */
  18. #ifndef SILENT_LIB
  19. /* Debug flags for other Training modules */
  20. u8 debug_training_static = DEBUG_LEVEL_ERROR;
  21. u8 debug_training = DEBUG_LEVEL_ERROR;
  22. u8 debug_leveling = DEBUG_LEVEL_ERROR;
  23. u8 debug_centralization = DEBUG_LEVEL_ERROR;
  24. u8 debug_training_ip = DEBUG_LEVEL_ERROR;
  25. u8 debug_training_bist = DEBUG_LEVEL_ERROR;
  26. u8 debug_training_hw_alg = DEBUG_LEVEL_ERROR;
  27. u8 debug_training_access = DEBUG_LEVEL_ERROR;
  28. u8 debug_training_a38x = DEBUG_LEVEL_ERROR;
  29. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  30. {
  31. switch (block) {
  32. case DEBUG_BLOCK_STATIC:
  33. debug_training_static = level;
  34. break;
  35. case DEBUG_BLOCK_TRAINING_MAIN:
  36. debug_training = level;
  37. break;
  38. case DEBUG_BLOCK_LEVELING:
  39. debug_leveling = level;
  40. break;
  41. case DEBUG_BLOCK_CENTRALIZATION:
  42. debug_centralization = level;
  43. break;
  44. case DEBUG_BLOCK_PBS:
  45. debug_pbs = level;
  46. break;
  47. case DEBUG_BLOCK_ALG:
  48. debug_training_hw_alg = level;
  49. break;
  50. case DEBUG_BLOCK_DEVICE:
  51. debug_training_a38x = level;
  52. break;
  53. case DEBUG_BLOCK_ACCESS:
  54. debug_training_access = level;
  55. break;
  56. case DEBUG_STAGES_REG_DUMP:
  57. if (level == DEBUG_LEVEL_TRACE)
  58. is_reg_dump = 1;
  59. else
  60. is_reg_dump = 0;
  61. break;
  62. case DEBUG_BLOCK_ALL:
  63. default:
  64. debug_training_static = level;
  65. debug_training = level;
  66. debug_leveling = level;
  67. debug_centralization = level;
  68. debug_pbs = level;
  69. debug_training_hw_alg = level;
  70. debug_training_access = level;
  71. debug_training_a38x = level;
  72. }
  73. }
  74. #else
  75. void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level)
  76. {
  77. return;
  78. }
  79. #endif
  80. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  81. u8 is_default_centralization = 0;
  82. u8 is_tune_result = 0;
  83. u8 is_validate_window_per_if = 0;
  84. u8 is_validate_window_per_pup = 0;
  85. u8 sweep_cnt = 1;
  86. u32 is_bist_reset_bit = 1;
  87. static struct hws_xsb_info xsb_info[HWS_MAX_DEVICE_NUM];
  88. /*
  89. * Dump Dunit & Phy registers
  90. */
  91. int ddr3_tip_reg_dump(u32 dev_num)
  92. {
  93. u32 if_id, reg_addr, data_value, bus_id;
  94. u32 read_data[MAX_INTERFACE_NUM];
  95. struct hws_topology_map *tm = ddr3_get_topology_map();
  96. printf("-- dunit registers --\n");
  97. for (reg_addr = 0x1400; reg_addr < 0x19f0; reg_addr += 4) {
  98. printf("0x%x ", reg_addr);
  99. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  100. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  101. CHECK_STATUS(ddr3_tip_if_read
  102. (dev_num, ACCESS_TYPE_UNICAST,
  103. if_id, reg_addr, read_data,
  104. MASK_ALL_BITS));
  105. printf("0x%x ", read_data[if_id]);
  106. }
  107. printf("\n");
  108. }
  109. printf("-- Phy registers --\n");
  110. for (reg_addr = 0; reg_addr <= 0xff; reg_addr++) {
  111. printf("0x%x ", reg_addr);
  112. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  113. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  114. for (bus_id = 0;
  115. bus_id < tm->num_of_bus_per_interface;
  116. bus_id++) {
  117. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  118. CHECK_STATUS(ddr3_tip_bus_read
  119. (dev_num, if_id,
  120. ACCESS_TYPE_UNICAST, bus_id,
  121. DDR_PHY_DATA, reg_addr,
  122. &data_value));
  123. printf("0x%x ", data_value);
  124. }
  125. for (bus_id = 0;
  126. bus_id < tm->num_of_bus_per_interface;
  127. bus_id++) {
  128. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  129. CHECK_STATUS(ddr3_tip_bus_read
  130. (dev_num, if_id,
  131. ACCESS_TYPE_UNICAST, bus_id,
  132. DDR_PHY_CONTROL, reg_addr,
  133. &data_value));
  134. printf("0x%x ", data_value);
  135. }
  136. }
  137. printf("\n");
  138. }
  139. return MV_OK;
  140. }
  141. /*
  142. * Register access func registration
  143. */
  144. int ddr3_tip_init_config_func(u32 dev_num,
  145. struct hws_tip_config_func_db *config_func)
  146. {
  147. if (config_func == NULL)
  148. return MV_BAD_PARAM;
  149. memcpy(&config_func_info[dev_num], config_func,
  150. sizeof(struct hws_tip_config_func_db));
  151. return MV_OK;
  152. }
  153. /*
  154. * Get training result info pointer
  155. */
  156. enum hws_result *ddr3_tip_get_result_ptr(u32 stage)
  157. {
  158. return training_result[stage];
  159. }
  160. /*
  161. * Device info read
  162. */
  163. int ddr3_tip_get_device_info(u32 dev_num, struct ddr3_device_info *info_ptr)
  164. {
  165. if (config_func_info[dev_num].tip_get_device_info_func != NULL) {
  166. return config_func_info[dev_num].
  167. tip_get_device_info_func((u8) dev_num, info_ptr);
  168. }
  169. return MV_FAIL;
  170. }
  171. #ifndef EXCLUDE_SWITCH_DEBUG
  172. /*
  173. * Convert freq to character string
  174. */
  175. static char *convert_freq(enum hws_ddr_freq freq)
  176. {
  177. switch (freq) {
  178. case DDR_FREQ_LOW_FREQ:
  179. return "DDR_FREQ_LOW_FREQ";
  180. case DDR_FREQ_400:
  181. return "400";
  182. case DDR_FREQ_533:
  183. return "533";
  184. case DDR_FREQ_667:
  185. return "667";
  186. case DDR_FREQ_800:
  187. return "800";
  188. case DDR_FREQ_933:
  189. return "933";
  190. case DDR_FREQ_1066:
  191. return "1066";
  192. case DDR_FREQ_311:
  193. return "311";
  194. case DDR_FREQ_333:
  195. return "333";
  196. case DDR_FREQ_467:
  197. return "467";
  198. case DDR_FREQ_850:
  199. return "850";
  200. case DDR_FREQ_900:
  201. return "900";
  202. case DDR_FREQ_360:
  203. return "DDR_FREQ_360";
  204. case DDR_FREQ_1000:
  205. return "DDR_FREQ_1000";
  206. default:
  207. return "Unknown Frequency";
  208. }
  209. }
  210. /*
  211. * Convert device ID to character string
  212. */
  213. static char *convert_dev_id(u32 dev_id)
  214. {
  215. switch (dev_id) {
  216. case 0x6800:
  217. return "A38xx";
  218. case 0x6900:
  219. return "A39XX";
  220. case 0xf400:
  221. return "AC3";
  222. case 0xfc00:
  223. return "BC2";
  224. default:
  225. return "Unknown Device";
  226. }
  227. }
  228. /*
  229. * Convert device ID to character string
  230. */
  231. static char *convert_mem_size(u32 dev_id)
  232. {
  233. switch (dev_id) {
  234. case 0:
  235. return "512 MB";
  236. case 1:
  237. return "1 GB";
  238. case 2:
  239. return "2 GB";
  240. case 3:
  241. return "4 GB";
  242. case 4:
  243. return "8 GB";
  244. default:
  245. return "wrong mem size";
  246. }
  247. }
  248. int print_device_info(u8 dev_num)
  249. {
  250. struct ddr3_device_info info_ptr;
  251. struct hws_topology_map *tm = ddr3_get_topology_map();
  252. CHECK_STATUS(ddr3_tip_get_device_info(dev_num, &info_ptr));
  253. printf("=== DDR setup START===\n");
  254. printf("\tDevice ID: %s\n", convert_dev_id(info_ptr.device_id));
  255. printf("\tDDR3 CK delay: %d\n", info_ptr.ck_delay);
  256. print_topology(tm);
  257. printf("=== DDR setup END===\n");
  258. return MV_OK;
  259. }
  260. void hws_ddr3_tip_sweep_test(int enable)
  261. {
  262. if (enable) {
  263. is_validate_window_per_if = 1;
  264. is_validate_window_per_pup = 1;
  265. debug_training = DEBUG_LEVEL_TRACE;
  266. } else {
  267. is_validate_window_per_if = 0;
  268. is_validate_window_per_pup = 0;
  269. }
  270. }
  271. #endif
  272. char *ddr3_tip_convert_tune_result(enum hws_result tune_result)
  273. {
  274. switch (tune_result) {
  275. case TEST_FAILED:
  276. return "FAILED";
  277. case TEST_SUCCESS:
  278. return "PASS";
  279. case NO_TEST_DONE:
  280. return "NOT COMPLETED";
  281. default:
  282. return "Un-KNOWN";
  283. }
  284. }
  285. /*
  286. * Print log info
  287. */
  288. int ddr3_tip_print_log(u32 dev_num, u32 mem_addr)
  289. {
  290. u32 if_id = 0;
  291. struct hws_topology_map *tm = ddr3_get_topology_map();
  292. #ifndef EXCLUDE_SWITCH_DEBUG
  293. if ((is_validate_window_per_if != 0) ||
  294. (is_validate_window_per_pup != 0)) {
  295. u32 is_pup_log = 0;
  296. enum hws_ddr_freq freq;
  297. freq = tm->interface_params[first_active_if].memory_freq;
  298. is_pup_log = (is_validate_window_per_pup != 0) ? 1 : 0;
  299. printf("===VALIDATE WINDOW LOG START===\n");
  300. printf("DDR Frequency: %s ======\n", convert_freq(freq));
  301. /* print sweep windows */
  302. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 1, is_pup_log);
  303. ddr3_tip_run_sweep_test(dev_num, sweep_cnt, 0, is_pup_log);
  304. ddr3_tip_print_all_pbs_result(dev_num);
  305. ddr3_tip_print_wl_supp_result(dev_num);
  306. printf("===VALIDATE WINDOW LOG END ===\n");
  307. CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num));
  308. ddr3_tip_reg_dump(dev_num);
  309. }
  310. #endif
  311. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  312. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  313. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  314. ("IF %d Status:\n", if_id));
  315. if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) {
  316. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  317. ("\tInit Controller: %s\n",
  318. ddr3_tip_convert_tune_result
  319. (training_result[INIT_CONTROLLER]
  320. [if_id])));
  321. }
  322. if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) {
  323. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  324. ("\tLow freq Config: %s\n",
  325. ddr3_tip_convert_tune_result
  326. (training_result[SET_LOW_FREQ]
  327. [if_id])));
  328. }
  329. if (mask_tune_func & LOAD_PATTERN_MASK_BIT) {
  330. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  331. ("\tLoad Pattern: %s\n",
  332. ddr3_tip_convert_tune_result
  333. (training_result[LOAD_PATTERN]
  334. [if_id])));
  335. }
  336. if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) {
  337. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  338. ("\tMedium freq Config: %s\n",
  339. ddr3_tip_convert_tune_result
  340. (training_result[SET_MEDIUM_FREQ]
  341. [if_id])));
  342. }
  343. if (mask_tune_func & WRITE_LEVELING_MASK_BIT) {
  344. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  345. ("\tWL: %s\n",
  346. ddr3_tip_convert_tune_result
  347. (training_result[WRITE_LEVELING]
  348. [if_id])));
  349. }
  350. if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) {
  351. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  352. ("\tLoad Pattern: %s\n",
  353. ddr3_tip_convert_tune_result
  354. (training_result[LOAD_PATTERN_2]
  355. [if_id])));
  356. }
  357. if (mask_tune_func & READ_LEVELING_MASK_BIT) {
  358. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  359. ("\tRL: %s\n",
  360. ddr3_tip_convert_tune_result
  361. (training_result[READ_LEVELING]
  362. [if_id])));
  363. }
  364. if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) {
  365. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  366. ("\tWL Supp: %s\n",
  367. ddr3_tip_convert_tune_result
  368. (training_result[WRITE_LEVELING_SUPP]
  369. [if_id])));
  370. }
  371. if (mask_tune_func & PBS_RX_MASK_BIT) {
  372. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  373. ("\tPBS RX: %s\n",
  374. ddr3_tip_convert_tune_result
  375. (training_result[PBS_RX]
  376. [if_id])));
  377. }
  378. if (mask_tune_func & PBS_TX_MASK_BIT) {
  379. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  380. ("\tPBS TX: %s\n",
  381. ddr3_tip_convert_tune_result
  382. (training_result[PBS_TX]
  383. [if_id])));
  384. }
  385. if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
  386. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  387. ("\tTarget freq Config: %s\n",
  388. ddr3_tip_convert_tune_result
  389. (training_result[SET_TARGET_FREQ]
  390. [if_id])));
  391. }
  392. if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) {
  393. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  394. ("\tWL TF: %s\n",
  395. ddr3_tip_convert_tune_result
  396. (training_result[WRITE_LEVELING_TF]
  397. [if_id])));
  398. }
  399. if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) {
  400. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  401. ("\tRL TF: %s\n",
  402. ddr3_tip_convert_tune_result
  403. (training_result[READ_LEVELING_TF]
  404. [if_id])));
  405. }
  406. if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
  407. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  408. ("\tWL TF Supp: %s\n",
  409. ddr3_tip_convert_tune_result
  410. (training_result
  411. [WRITE_LEVELING_SUPP_TF]
  412. [if_id])));
  413. }
  414. if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) {
  415. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  416. ("\tCentr RX: %s\n",
  417. ddr3_tip_convert_tune_result
  418. (training_result[CENTRALIZATION_RX]
  419. [if_id])));
  420. }
  421. if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) {
  422. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  423. ("\tVREF_CALIBRATION: %s\n",
  424. ddr3_tip_convert_tune_result
  425. (training_result[VREF_CALIBRATION]
  426. [if_id])));
  427. }
  428. if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
  429. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  430. ("\tCentr TX: %s\n",
  431. ddr3_tip_convert_tune_result
  432. (training_result[CENTRALIZATION_TX]
  433. [if_id])));
  434. }
  435. }
  436. return MV_OK;
  437. }
  438. /*
  439. * Print stability log info
  440. */
  441. int ddr3_tip_print_stability_log(u32 dev_num)
  442. {
  443. u8 if_id = 0, csindex = 0, bus_id = 0, idx = 0;
  444. u32 reg_data;
  445. u32 read_data[MAX_INTERFACE_NUM];
  446. u32 max_cs = hws_ddr3_tip_max_cs_get();
  447. struct hws_topology_map *tm = ddr3_get_topology_map();
  448. /* Title print */
  449. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  450. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  451. printf("Title: I/F# , Tj, Calibration_n0, Calibration_p0, Calibration_n1, Calibration_p1, Calibration_n2, Calibration_p2,");
  452. for (csindex = 0; csindex < max_cs; csindex++) {
  453. printf("CS%d , ", csindex);
  454. printf("\n");
  455. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  456. printf("VWTx, VWRx, WL_tot, WL_ADLL, WL_PH, RL_Tot, RL_ADLL, RL_PH, RL_Smp, Cen_tx, Cen_rx, Vref, DQVref,");
  457. printf("\t\t");
  458. for (idx = 0; idx < 11; idx++)
  459. printf("PBSTx-Pad%d,", idx);
  460. printf("\t\t");
  461. for (idx = 0; idx < 11; idx++)
  462. printf("PBSRx-Pad%d,", idx);
  463. }
  464. }
  465. printf("\n");
  466. /* Data print */
  467. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  468. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  469. printf("Data: %d,%d,", if_id,
  470. (config_func_info[dev_num].tip_get_temperature != NULL)
  471. ? (config_func_info[dev_num].
  472. tip_get_temperature(dev_num)) : (0));
  473. CHECK_STATUS(ddr3_tip_if_read
  474. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14c8,
  475. read_data, MASK_ALL_BITS));
  476. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  477. ((read_data[if_id] & 0xfc00) >> 10));
  478. CHECK_STATUS(ddr3_tip_if_read
  479. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17c8,
  480. read_data, MASK_ALL_BITS));
  481. printf("%d,%d,", ((read_data[if_id] & 0x3f0) >> 4),
  482. ((read_data[if_id] & 0xfc00) >> 10));
  483. CHECK_STATUS(ddr3_tip_if_read
  484. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1dc8,
  485. read_data, MASK_ALL_BITS));
  486. printf("%d,%d,", ((read_data[if_id] & 0x3f0000) >> 16),
  487. ((read_data[if_id] & 0xfc00000) >> 22));
  488. for (csindex = 0; csindex < max_cs; csindex++) {
  489. printf("CS%d , ", csindex);
  490. for (bus_id = 0; bus_id < MAX_BUS_NUM; bus_id++) {
  491. printf("\n");
  492. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  493. ddr3_tip_bus_read(dev_num, if_id,
  494. ACCESS_TYPE_UNICAST,
  495. bus_id, DDR_PHY_DATA,
  496. RESULT_DB_PHY_REG_ADDR +
  497. csindex, &reg_data);
  498. printf("%d,%d,", (reg_data & 0x1f),
  499. ((reg_data & 0x3e0) >> 5));
  500. /* WL */
  501. ddr3_tip_bus_read(dev_num, if_id,
  502. ACCESS_TYPE_UNICAST,
  503. bus_id, DDR_PHY_DATA,
  504. WL_PHY_REG +
  505. csindex * 4, &reg_data);
  506. printf("%d,%d,%d,",
  507. (reg_data & 0x1f) +
  508. ((reg_data & 0x1c0) >> 6) * 32,
  509. (reg_data & 0x1f),
  510. (reg_data & 0x1c0) >> 6);
  511. /* RL */
  512. CHECK_STATUS(ddr3_tip_if_read
  513. (dev_num, ACCESS_TYPE_UNICAST,
  514. if_id,
  515. READ_DATA_SAMPLE_DELAY,
  516. read_data, MASK_ALL_BITS));
  517. read_data[if_id] =
  518. (read_data[if_id] &
  519. (0xf << (4 * csindex))) >>
  520. (4 * csindex);
  521. ddr3_tip_bus_read(dev_num, if_id,
  522. ACCESS_TYPE_UNICAST, bus_id,
  523. DDR_PHY_DATA,
  524. RL_PHY_REG + csindex * 4,
  525. &reg_data);
  526. printf("%d,%d,%d,%d,",
  527. (reg_data & 0x1f) +
  528. ((reg_data & 0x1c0) >> 6) * 32 +
  529. read_data[if_id] * 64,
  530. (reg_data & 0x1f),
  531. ((reg_data & 0x1c0) >> 6),
  532. read_data[if_id]);
  533. /* Centralization */
  534. ddr3_tip_bus_read(dev_num, if_id,
  535. ACCESS_TYPE_UNICAST, bus_id,
  536. DDR_PHY_DATA,
  537. WRITE_CENTRALIZATION_PHY_REG
  538. + csindex * 4, &reg_data);
  539. printf("%d,", (reg_data & 0x3f));
  540. ddr3_tip_bus_read(dev_num, if_id,
  541. ACCESS_TYPE_UNICAST, bus_id,
  542. DDR_PHY_DATA,
  543. READ_CENTRALIZATION_PHY_REG
  544. + csindex * 4, &reg_data);
  545. printf("%d,", (reg_data & 0x1f));
  546. /* Vref */
  547. ddr3_tip_bus_read(dev_num, if_id,
  548. ACCESS_TYPE_UNICAST, bus_id,
  549. DDR_PHY_DATA,
  550. PAD_CONFIG_PHY_REG,
  551. &reg_data);
  552. printf("%d,", (reg_data & 0x7));
  553. /* DQVref */
  554. /* Need to add the Read Function from device */
  555. printf("%d,", 0);
  556. printf("\t\t");
  557. for (idx = 0; idx < 11; idx++) {
  558. ddr3_tip_bus_read(dev_num, if_id,
  559. ACCESS_TYPE_UNICAST,
  560. bus_id, DDR_PHY_DATA,
  561. 0xd0 +
  562. 12 * csindex +
  563. idx, &reg_data);
  564. printf("%d,", (reg_data & 0x3f));
  565. }
  566. printf("\t\t");
  567. for (idx = 0; idx < 11; idx++) {
  568. ddr3_tip_bus_read(dev_num, if_id,
  569. ACCESS_TYPE_UNICAST,
  570. bus_id, DDR_PHY_DATA,
  571. 0x10 +
  572. 16 * csindex +
  573. idx, &reg_data);
  574. printf("%d,", (reg_data & 0x3f));
  575. }
  576. printf("\t\t");
  577. for (idx = 0; idx < 11; idx++) {
  578. ddr3_tip_bus_read(dev_num, if_id,
  579. ACCESS_TYPE_UNICAST,
  580. bus_id, DDR_PHY_DATA,
  581. 0x50 +
  582. 16 * csindex +
  583. idx, &reg_data);
  584. printf("%d,", (reg_data & 0x3f));
  585. }
  586. }
  587. }
  588. }
  589. printf("\n");
  590. return MV_OK;
  591. }
  592. /*
  593. * Register XSB information
  594. */
  595. int ddr3_tip_register_xsb_info(u32 dev_num, struct hws_xsb_info *xsb_info_table)
  596. {
  597. memcpy(&xsb_info[dev_num], xsb_info_table, sizeof(struct hws_xsb_info));
  598. return MV_OK;
  599. }
  600. /*
  601. * Read ADLL Value
  602. */
  603. int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  604. int reg_addr, u32 mask)
  605. {
  606. u32 data_value;
  607. u32 if_id = 0, bus_id = 0;
  608. u32 dev_num = 0;
  609. struct hws_topology_map *tm = ddr3_get_topology_map();
  610. /*
  611. * multi CS support - reg_addr is calucalated in calling function
  612. * with CS offset
  613. */
  614. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  615. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  616. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  617. bus_id++) {
  618. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  619. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
  620. ACCESS_TYPE_UNICAST,
  621. bus_id,
  622. DDR_PHY_DATA, reg_addr,
  623. &data_value));
  624. pup_values[if_id *
  625. tm->num_of_bus_per_interface + bus_id] =
  626. data_value & mask;
  627. }
  628. }
  629. return 0;
  630. }
  631. /*
  632. * Write ADLL Value
  633. */
  634. int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
  635. int reg_addr)
  636. {
  637. u32 if_id = 0, bus_id = 0;
  638. u32 dev_num = 0, data;
  639. struct hws_topology_map *tm = ddr3_get_topology_map();
  640. /*
  641. * multi CS support - reg_addr is calucalated in calling function
  642. * with CS offset
  643. */
  644. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  645. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  646. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  647. bus_id++) {
  648. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  649. data = pup_values[if_id *
  650. tm->num_of_bus_per_interface +
  651. bus_id];
  652. CHECK_STATUS(ddr3_tip_bus_write(dev_num,
  653. ACCESS_TYPE_UNICAST,
  654. if_id,
  655. ACCESS_TYPE_UNICAST,
  656. bus_id, DDR_PHY_DATA,
  657. reg_addr, data));
  658. }
  659. }
  660. return 0;
  661. }
  662. #ifndef EXCLUDE_SWITCH_DEBUG
  663. u32 rl_version = 1; /* 0 - old RL machine */
  664. struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
  665. u32 start_xsb_offset = 0;
  666. u8 is_rl_old = 0;
  667. u8 is_freq_old = 0;
  668. u8 is_dfs_disabled = 0;
  669. u32 default_centrlization_value = 0x12;
  670. u32 vref = 0x4;
  671. u32 activate_select_before_run_alg = 1, activate_deselect_after_run_alg = 1,
  672. rl_test = 0, reset_read_fifo = 0;
  673. int debug_acc = 0;
  674. u32 ctrl_sweepres[ADLL_LENGTH][MAX_INTERFACE_NUM][MAX_BUS_NUM];
  675. u32 ctrl_adll[MAX_CS_NUM * MAX_INTERFACE_NUM * MAX_BUS_NUM];
  676. u8 cs_mask_reg[] = {
  677. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  678. };
  679. u32 xsb_test_table[][8] = {
  680. {0x00000000, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555,
  681. 0x66666666, 0x77777777},
  682. {0x88888888, 0x99999999, 0xaaaaaaaa, 0xbbbbbbbb, 0xcccccccc, 0xdddddddd,
  683. 0xeeeeeeee, 0xffffffff},
  684. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  685. 0x00000000, 0xffffffff},
  686. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  687. 0x00000000, 0xffffffff},
  688. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  689. 0x00000000, 0xffffffff},
  690. {0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
  691. 0x00000000, 0xffffffff},
  692. {0x00000000, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
  693. 0xffffffff, 0xffffffff},
  694. {0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000,
  695. 0x00000000, 0x00000000},
  696. {0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
  697. 0xffffffff, 0xffffffff}
  698. };
  699. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr);
  700. int ddr3_tip_print_adll(void)
  701. {
  702. u32 bus_cnt = 0, if_id, data_p1, data_p2, ui_data3, dev_num = 0;
  703. struct hws_topology_map *tm = ddr3_get_topology_map();
  704. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  705. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  706. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES();
  707. bus_cnt++) {
  708. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  709. CHECK_STATUS(ddr3_tip_bus_read
  710. (dev_num, if_id,
  711. ACCESS_TYPE_UNICAST, bus_cnt,
  712. DDR_PHY_DATA, 0x1, &data_p1));
  713. CHECK_STATUS(ddr3_tip_bus_read
  714. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  715. bus_cnt, DDR_PHY_DATA, 0x2, &data_p2));
  716. CHECK_STATUS(ddr3_tip_bus_read
  717. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  718. bus_cnt, DDR_PHY_DATA, 0x3, &ui_data3));
  719. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  720. (" IF %d bus_cnt %d phy_reg_1_data 0x%x phy_reg_2_data 0x%x phy_reg_3_data 0x%x\n",
  721. if_id, bus_cnt, data_p1, data_p2,
  722. ui_data3));
  723. }
  724. }
  725. return MV_OK;
  726. }
  727. /*
  728. * Set attribute value
  729. */
  730. int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value)
  731. {
  732. int ret;
  733. u32 *ptr_flag = NULL;
  734. ret = ddr3_tip_access_atr(dev_num, flag_id, value, &ptr_flag);
  735. if (ptr_flag != NULL) {
  736. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x (was 0x%x)\n",
  737. flag_id, value, *ptr_flag);
  738. *ptr_flag = value;
  739. } else {
  740. printf("ddr3_tip_set_atr Flag ID 0x%x value is set to 0x%x\n",
  741. flag_id, value);
  742. }
  743. return ret;
  744. }
  745. /*
  746. * Access attribute
  747. */
  748. static int ddr3_tip_access_atr(u32 dev_num, u32 flag_id, u32 value, u32 **ptr)
  749. {
  750. u32 tmp_val = 0, if_id = 0, pup_id = 0;
  751. struct hws_topology_map *tm = ddr3_get_topology_map();
  752. *ptr = NULL;
  753. switch (flag_id) {
  754. case 0:
  755. *ptr = (u32 *)&(tm->if_act_mask);
  756. break;
  757. case 0x1:
  758. *ptr = (u32 *)&mask_tune_func;
  759. break;
  760. case 0x2:
  761. *ptr = (u32 *)&low_freq;
  762. break;
  763. case 0x3:
  764. *ptr = (u32 *)&medium_freq;
  765. break;
  766. case 0x4:
  767. *ptr = (u32 *)&generic_init_controller;
  768. break;
  769. case 0x5:
  770. *ptr = (u32 *)&rl_version;
  771. break;
  772. case 0x8:
  773. *ptr = (u32 *)&start_xsb_offset;
  774. break;
  775. case 0x20:
  776. *ptr = (u32 *)&is_rl_old;
  777. break;
  778. case 0x21:
  779. *ptr = (u32 *)&is_freq_old;
  780. break;
  781. case 0x23:
  782. *ptr = (u32 *)&is_dfs_disabled;
  783. break;
  784. case 0x24:
  785. *ptr = (u32 *)&is_pll_before_init;
  786. break;
  787. case 0x25:
  788. *ptr = (u32 *)&is_adll_calib_before_init;
  789. break;
  790. #ifdef STATIC_ALGO_SUPPORT
  791. case 0x26:
  792. *ptr = (u32 *)&(silicon_delay[0]);
  793. break;
  794. case 0x27:
  795. *ptr = (u32 *)&wl_debug_delay;
  796. break;
  797. #endif
  798. case 0x28:
  799. *ptr = (u32 *)&is_tune_result;
  800. break;
  801. case 0x29:
  802. *ptr = (u32 *)&is_validate_window_per_if;
  803. break;
  804. case 0x2a:
  805. *ptr = (u32 *)&is_validate_window_per_pup;
  806. break;
  807. case 0x30:
  808. *ptr = (u32 *)&sweep_cnt;
  809. break;
  810. case 0x31:
  811. *ptr = (u32 *)&is_bist_reset_bit;
  812. break;
  813. case 0x32:
  814. *ptr = (u32 *)&is_dfs_in_init;
  815. break;
  816. case 0x33:
  817. *ptr = (u32 *)&p_finger;
  818. break;
  819. case 0x34:
  820. *ptr = (u32 *)&n_finger;
  821. break;
  822. case 0x35:
  823. *ptr = (u32 *)&init_freq;
  824. break;
  825. case 0x36:
  826. *ptr = (u32 *)&(freq_val[DDR_FREQ_LOW_FREQ]);
  827. break;
  828. case 0x37:
  829. *ptr = (u32 *)&start_pattern;
  830. break;
  831. case 0x38:
  832. *ptr = (u32 *)&end_pattern;
  833. break;
  834. case 0x39:
  835. *ptr = (u32 *)&phy_reg0_val;
  836. break;
  837. case 0x4a:
  838. *ptr = (u32 *)&phy_reg1_val;
  839. break;
  840. case 0x4b:
  841. *ptr = (u32 *)&phy_reg2_val;
  842. break;
  843. case 0x4c:
  844. *ptr = (u32 *)&phy_reg3_val;
  845. break;
  846. case 0x4e:
  847. *ptr = (u32 *)&sweep_pattern;
  848. break;
  849. case 0x50:
  850. *ptr = (u32 *)&is_rzq6;
  851. break;
  852. case 0x51:
  853. *ptr = (u32 *)&znri_data_phy_val;
  854. break;
  855. case 0x52:
  856. *ptr = (u32 *)&zpri_data_phy_val;
  857. break;
  858. case 0x53:
  859. *ptr = (u32 *)&finger_test;
  860. break;
  861. case 0x54:
  862. *ptr = (u32 *)&n_finger_start;
  863. break;
  864. case 0x55:
  865. *ptr = (u32 *)&n_finger_end;
  866. break;
  867. case 0x56:
  868. *ptr = (u32 *)&p_finger_start;
  869. break;
  870. case 0x57:
  871. *ptr = (u32 *)&p_finger_end;
  872. break;
  873. case 0x58:
  874. *ptr = (u32 *)&p_finger_step;
  875. break;
  876. case 0x59:
  877. *ptr = (u32 *)&n_finger_step;
  878. break;
  879. case 0x5a:
  880. *ptr = (u32 *)&znri_ctrl_phy_val;
  881. break;
  882. case 0x5b:
  883. *ptr = (u32 *)&zpri_ctrl_phy_val;
  884. break;
  885. case 0x5c:
  886. *ptr = (u32 *)&is_reg_dump;
  887. break;
  888. case 0x5d:
  889. *ptr = (u32 *)&vref;
  890. break;
  891. case 0x5e:
  892. *ptr = (u32 *)&mode2_t;
  893. break;
  894. case 0x5f:
  895. *ptr = (u32 *)&xsb_validate_type;
  896. break;
  897. case 0x60:
  898. *ptr = (u32 *)&xsb_validation_base_address;
  899. break;
  900. case 0x67:
  901. *ptr = (u32 *)&activate_select_before_run_alg;
  902. break;
  903. case 0x68:
  904. *ptr = (u32 *)&activate_deselect_after_run_alg;
  905. break;
  906. case 0x69:
  907. *ptr = (u32 *)&odt_additional;
  908. break;
  909. case 0x70:
  910. *ptr = (u32 *)&debug_mode;
  911. break;
  912. case 0x71:
  913. *ptr = (u32 *)&pbs_pattern;
  914. break;
  915. case 0x72:
  916. *ptr = (u32 *)&delay_enable;
  917. break;
  918. case 0x73:
  919. *ptr = (u32 *)&ck_delay;
  920. break;
  921. case 0x74:
  922. *ptr = (u32 *)&ck_delay_16;
  923. break;
  924. case 0x75:
  925. *ptr = (u32 *)&ca_delay;
  926. break;
  927. case 0x100:
  928. *ptr = (u32 *)&debug_dunit;
  929. break;
  930. case 0x101:
  931. debug_acc = (int)value;
  932. break;
  933. case 0x102:
  934. debug_training = (u8)value;
  935. break;
  936. case 0x103:
  937. debug_training_bist = (u8)value;
  938. break;
  939. case 0x104:
  940. debug_centralization = (u8)value;
  941. break;
  942. case 0x105:
  943. debug_training_ip = (u8)value;
  944. break;
  945. case 0x106:
  946. debug_leveling = (u8)value;
  947. break;
  948. case 0x107:
  949. debug_pbs = (u8)value;
  950. break;
  951. case 0x108:
  952. debug_training_static = (u8)value;
  953. break;
  954. case 0x109:
  955. debug_training_access = (u8)value;
  956. break;
  957. case 0x112:
  958. *ptr = &start_pattern;
  959. break;
  960. case 0x113:
  961. *ptr = &end_pattern;
  962. break;
  963. default:
  964. if ((flag_id >= 0x200) && (flag_id < 0x210)) {
  965. if_id = flag_id - 0x200;
  966. *ptr = (u32 *)&(tm->interface_params
  967. [if_id].memory_freq);
  968. } else if ((flag_id >= 0x210) && (flag_id < 0x220)) {
  969. if_id = flag_id - 0x210;
  970. *ptr = (u32 *)&(tm->interface_params
  971. [if_id].speed_bin_index);
  972. } else if ((flag_id >= 0x220) && (flag_id < 0x230)) {
  973. if_id = flag_id - 0x220;
  974. *ptr = (u32 *)&(tm->interface_params
  975. [if_id].bus_width);
  976. } else if ((flag_id >= 0x230) && (flag_id < 0x240)) {
  977. if_id = flag_id - 0x230;
  978. *ptr = (u32 *)&(tm->interface_params
  979. [if_id].memory_size);
  980. } else if ((flag_id >= 0x240) && (flag_id < 0x250)) {
  981. if_id = flag_id - 0x240;
  982. *ptr = (u32 *)&(tm->interface_params
  983. [if_id].cas_l);
  984. } else if ((flag_id >= 0x250) && (flag_id < 0x260)) {
  985. if_id = flag_id - 0x250;
  986. *ptr = (u32 *)&(tm->interface_params
  987. [if_id].cas_wl);
  988. } else if ((flag_id >= 0x270) && (flag_id < 0x2cf)) {
  989. if_id = (flag_id - 0x270) / MAX_BUS_NUM;
  990. pup_id = (flag_id - 0x270) % MAX_BUS_NUM;
  991. *ptr = (u32 *)&(tm->interface_params[if_id].
  992. as_bus_params[pup_id].is_ck_swap);
  993. } else if ((flag_id >= 0x2d0) && (flag_id < 0x32f)) {
  994. if_id = (flag_id - 0x2d0) / MAX_BUS_NUM;
  995. pup_id = (flag_id - 0x2d0) % MAX_BUS_NUM;
  996. *ptr = (u32 *)&(tm->interface_params[if_id].
  997. as_bus_params[pup_id].is_dqs_swap);
  998. } else if ((flag_id >= 0x330) && (flag_id < 0x38f)) {
  999. if_id = (flag_id - 0x330) / MAX_BUS_NUM;
  1000. pup_id = (flag_id - 0x330) % MAX_BUS_NUM;
  1001. *ptr = (u32 *)&(tm->interface_params[if_id].
  1002. as_bus_params[pup_id].cs_bitmask);
  1003. } else if ((flag_id >= 0x390) && (flag_id < 0x3ef)) {
  1004. if_id = (flag_id - 0x390) / MAX_BUS_NUM;
  1005. pup_id = (flag_id - 0x390) % MAX_BUS_NUM;
  1006. *ptr = (u32 *)&(tm->interface_params
  1007. [if_id].as_bus_params
  1008. [pup_id].mirror_enable_bitmask);
  1009. } else if ((flag_id >= 0x500) && (flag_id <= 0x50f)) {
  1010. tmp_val = flag_id - 0x320;
  1011. *ptr = (u32 *)&(clamp_tbl[tmp_val]);
  1012. } else {
  1013. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1014. ("flag_id out of boundary %d\n",
  1015. flag_id));
  1016. return MV_BAD_PARAM;
  1017. }
  1018. }
  1019. return MV_OK;
  1020. }
  1021. #ifndef EXCLUDE_SWITCH_DEBUG
  1022. /*
  1023. * Print ADLL
  1024. */
  1025. int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM])
  1026. {
  1027. u32 i, j;
  1028. struct hws_topology_map *tm = ddr3_get_topology_map();
  1029. for (j = 0; j < tm->num_of_bus_per_interface; j++) {
  1030. VALIDATE_ACTIVE(tm->bus_act_mask, j);
  1031. for (i = 0; i < MAX_INTERFACE_NUM; i++) {
  1032. printf("%d ,",
  1033. adll[i * tm->num_of_bus_per_interface + j]);
  1034. }
  1035. }
  1036. printf("\n");
  1037. return MV_OK;
  1038. }
  1039. #endif
  1040. /* byte_index - only byte 0, 1, 2, or 3, oxff - test all bytes */
  1041. static u32 ddr3_tip_compare(u32 if_id, u32 *p_src, u32 *p_dst,
  1042. u32 byte_index)
  1043. {
  1044. u32 burst_cnt = 0, addr_offset, i_id;
  1045. int b_is_fail = 0;
  1046. addr_offset =
  1047. (byte_index ==
  1048. 0xff) ? (u32) 0xffffffff : (u32) (0xff << (byte_index * 8));
  1049. for (burst_cnt = 0; burst_cnt < EXT_ACCESS_BURST_LENGTH; burst_cnt++) {
  1050. if ((p_src[burst_cnt] & addr_offset) !=
  1051. (p_dst[burst_cnt] & addr_offset))
  1052. b_is_fail = 1;
  1053. }
  1054. if (b_is_fail == 1) {
  1055. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1056. ("IF %d exp: ", if_id));
  1057. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1058. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1059. ("0x%8x ", p_src[i_id]));
  1060. }
  1061. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1062. ("\n_i_f %d rcv: ", if_id));
  1063. for (i_id = 0; i_id <= MAX_INTERFACE_NUM - 1; i_id++) {
  1064. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1065. ("(0x%8x ", p_dst[i_id]));
  1066. }
  1067. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("\n "));
  1068. }
  1069. return b_is_fail;
  1070. }
  1071. /* test_type = 0-tx , 1-rx */
  1072. int ddr3_tip_sweep_test(u32 dev_num, u32 test_type,
  1073. u32 mem_addr, u32 is_modify_adll,
  1074. u32 start_if, u32 end_if, u32 startpup, u32 endpup)
  1075. {
  1076. u32 bus_cnt = 0, adll_val = 0, if_id, ui_prev_adll, ui_mask_bit,
  1077. end_adll, start_adll;
  1078. u32 reg_addr = 0;
  1079. struct hws_topology_map *tm = ddr3_get_topology_map();
  1080. if (test_type == 0) {
  1081. reg_addr = 1;
  1082. ui_mask_bit = 0x3f;
  1083. start_adll = 0;
  1084. end_adll = ui_mask_bit;
  1085. } else {
  1086. reg_addr = 3;
  1087. ui_mask_bit = 0x1f;
  1088. start_adll = 0;
  1089. end_adll = ui_mask_bit;
  1090. }
  1091. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1092. ("==============================\n"));
  1093. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1094. ("Test type %d (0-tx, 1-rx)\n", test_type));
  1095. for (if_id = start_if; if_id <= end_if; if_id++) {
  1096. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1097. for (bus_cnt = startpup; bus_cnt < endpup; bus_cnt++) {
  1098. CHECK_STATUS(ddr3_tip_bus_read
  1099. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1100. bus_cnt, DDR_PHY_DATA, reg_addr,
  1101. &ui_prev_adll));
  1102. for (adll_val = start_adll; adll_val <= end_adll;
  1103. adll_val++) {
  1104. if (is_modify_adll == 1) {
  1105. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1106. (dev_num,
  1107. ACCESS_TYPE_UNICAST,
  1108. if_id, bus_cnt,
  1109. DDR_PHY_DATA, reg_addr,
  1110. adll_val, ui_mask_bit));
  1111. }
  1112. }
  1113. if (is_modify_adll == 1) {
  1114. CHECK_STATUS(ddr3_tip_bus_write
  1115. (dev_num, ACCESS_TYPE_UNICAST,
  1116. if_id, ACCESS_TYPE_UNICAST,
  1117. bus_cnt, DDR_PHY_DATA, reg_addr,
  1118. ui_prev_adll));
  1119. }
  1120. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1121. }
  1122. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("\n"));
  1123. }
  1124. return MV_OK;
  1125. }
  1126. #ifndef EXCLUDE_SWITCH_DEBUG
  1127. /*
  1128. * Sweep validation
  1129. */
  1130. int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
  1131. u32 mode)
  1132. {
  1133. u32 pup = 0, start_pup = 0, end_pup = 0;
  1134. u32 adll = 0;
  1135. u32 res[MAX_INTERFACE_NUM] = { 0 };
  1136. int if_id = 0;
  1137. u32 adll_value = 0;
  1138. int reg = (direction == 0) ? WRITE_CENTRALIZATION_PHY_REG :
  1139. READ_CENTRALIZATION_PHY_REG;
  1140. enum hws_access_type pup_access;
  1141. u32 cs;
  1142. u32 max_cs = hws_ddr3_tip_max_cs_get();
  1143. struct hws_topology_map *tm = ddr3_get_topology_map();
  1144. if (mode == 1) {
  1145. /* per pup */
  1146. start_pup = 0;
  1147. end_pup = tm->num_of_bus_per_interface - 1;
  1148. pup_access = ACCESS_TYPE_UNICAST;
  1149. } else {
  1150. start_pup = 0;
  1151. end_pup = 0;
  1152. pup_access = ACCESS_TYPE_MULTICAST;
  1153. }
  1154. for (cs = 0; cs < max_cs; cs++) {
  1155. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1156. for (if_id = 0;
  1157. if_id <= MAX_INTERFACE_NUM - 1;
  1158. if_id++) {
  1159. VALIDATE_ACTIVE
  1160. (tm->if_act_mask,
  1161. if_id);
  1162. for (pup = start_pup; pup <= end_pup; pup++) {
  1163. ctrl_sweepres[adll][if_id][pup] =
  1164. 0;
  1165. }
  1166. }
  1167. }
  1168. for (adll = 0; adll < (MAX_INTERFACE_NUM * MAX_BUS_NUM); adll++)
  1169. ctrl_adll[adll] = 0;
  1170. /* Save DQS value(after algorithm run) */
  1171. read_adll_value(ctrl_adll,
  1172. (reg + (cs * CS_REGISTER_ADDR_OFFSET)),
  1173. MASK_ALL_BITS);
  1174. /*
  1175. * Sweep ADLL from 0:31 on all I/F on all Pup and perform
  1176. * BIST on each stage.
  1177. */
  1178. for (pup = start_pup; pup <= end_pup; pup++) {
  1179. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1180. adll_value =
  1181. (direction == 0) ? (adll * 2) : adll;
  1182. CHECK_STATUS(ddr3_tip_bus_write
  1183. (dev_num, ACCESS_TYPE_MULTICAST, 0,
  1184. pup_access, pup, DDR_PHY_DATA,
  1185. reg + CS_REG_VALUE(cs),
  1186. adll_value));
  1187. hws_ddr3_run_bist(dev_num, sweep_pattern, res,
  1188. cs);
  1189. /* ddr3_tip_reset_fifo_ptr(dev_num); */
  1190. for (if_id = 0;
  1191. if_id <= MAX_INTERFACE_NUM - 1;
  1192. if_id++) {
  1193. VALIDATE_ACTIVE
  1194. (tm->if_act_mask,
  1195. if_id);
  1196. ctrl_sweepres[adll][if_id][pup]
  1197. = res[if_id];
  1198. if (mode == 1) {
  1199. CHECK_STATUS
  1200. (ddr3_tip_bus_write
  1201. (dev_num,
  1202. ACCESS_TYPE_UNICAST,
  1203. if_id,
  1204. ACCESS_TYPE_UNICAST,
  1205. pup,
  1206. DDR_PHY_DATA,
  1207. reg + CS_REG_VALUE(cs),
  1208. ctrl_adll[if_id *
  1209. cs *
  1210. tm->num_of_bus_per_interface
  1211. + pup]));
  1212. }
  1213. }
  1214. }
  1215. }
  1216. printf("Final, CS %d,%s, Sweep, Result, Adll,", cs,
  1217. ((direction == 0) ? "TX" : "RX"));
  1218. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1219. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1220. if (mode == 1) {
  1221. for (pup = start_pup; pup <= end_pup; pup++) {
  1222. VALIDATE_ACTIVE(tm->bus_act_mask, pup);
  1223. printf("I/F%d-PHY%d , ", if_id, pup);
  1224. }
  1225. } else {
  1226. printf("I/F%d , ", if_id);
  1227. }
  1228. }
  1229. printf("\n");
  1230. for (adll = 0; adll < ADLL_LENGTH; adll++) {
  1231. adll_value = (direction == 0) ? (adll * 2) : adll;
  1232. printf("Final,%s, Sweep, Result, %d ,",
  1233. ((direction == 0) ? "TX" : "RX"), adll_value);
  1234. for (if_id = 0;
  1235. if_id <= MAX_INTERFACE_NUM - 1;
  1236. if_id++) {
  1237. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1238. for (pup = start_pup; pup <= end_pup; pup++) {
  1239. printf("%d , ",
  1240. ctrl_sweepres[adll][if_id]
  1241. [pup]);
  1242. }
  1243. }
  1244. printf("\n");
  1245. }
  1246. /*
  1247. * Write back to the phy the Rx DQS value, we store in
  1248. * the beginning.
  1249. */
  1250. write_adll_value(ctrl_adll,
  1251. (reg + cs * CS_REGISTER_ADDR_OFFSET));
  1252. /* print adll results */
  1253. read_adll_value(ctrl_adll, (reg + cs * CS_REGISTER_ADDR_OFFSET),
  1254. MASK_ALL_BITS);
  1255. printf("%s, DQS, ADLL,,,", (direction == 0) ? "Tx" : "Rx");
  1256. print_adll(dev_num, ctrl_adll);
  1257. }
  1258. ddr3_tip_reset_fifo_ptr(dev_num);
  1259. return 0;
  1260. }
  1261. void print_topology(struct hws_topology_map *topology_db)
  1262. {
  1263. u32 ui, uj;
  1264. printf("\tinterface_mask: 0x%x\n", topology_db->if_act_mask);
  1265. printf("\tNum Bus: %d\n", topology_db->num_of_bus_per_interface);
  1266. printf("\tbus_act_mask: 0x%x\n", topology_db->bus_act_mask);
  1267. for (ui = 0; ui < MAX_INTERFACE_NUM; ui++) {
  1268. VALIDATE_ACTIVE(topology_db->if_act_mask, ui);
  1269. printf("\n\tInterface ID: %d\n", ui);
  1270. printf("\t\tDDR Frequency: %s\n",
  1271. convert_freq(topology_db->
  1272. interface_params[ui].memory_freq));
  1273. printf("\t\tSpeed_bin: %d\n",
  1274. topology_db->interface_params[ui].speed_bin_index);
  1275. printf("\t\tBus_width: %d\n",
  1276. (4 << topology_db->interface_params[ui].bus_width));
  1277. printf("\t\tMem_size: %s\n",
  1278. convert_mem_size(topology_db->
  1279. interface_params[ui].memory_size));
  1280. printf("\t\tCAS-WL: %d\n",
  1281. topology_db->interface_params[ui].cas_wl);
  1282. printf("\t\tCAS-L: %d\n",
  1283. topology_db->interface_params[ui].cas_l);
  1284. printf("\t\tTemperature: %d\n",
  1285. topology_db->interface_params[ui].interface_temp);
  1286. printf("\n");
  1287. for (uj = 0; uj < 4; uj++) {
  1288. printf("\t\tBus %d parameters- CS Mask: 0x%x\t", uj,
  1289. topology_db->interface_params[ui].
  1290. as_bus_params[uj].cs_bitmask);
  1291. printf("Mirror: 0x%x\t",
  1292. topology_db->interface_params[ui].
  1293. as_bus_params[uj].mirror_enable_bitmask);
  1294. printf("DQS Swap is %s \t",
  1295. (topology_db->
  1296. interface_params[ui].as_bus_params[uj].
  1297. is_dqs_swap == 1) ? "enabled" : "disabled");
  1298. printf("Ck Swap:%s\t",
  1299. (topology_db->
  1300. interface_params[ui].as_bus_params[uj].
  1301. is_ck_swap == 1) ? "enabled" : "disabled");
  1302. printf("\n");
  1303. }
  1304. }
  1305. }
  1306. #endif
  1307. /*
  1308. * Execute XSB Test transaction (rd/wr/both)
  1309. */
  1310. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1311. u32 read_type, u32 burst_length)
  1312. {
  1313. u32 seq = 0, if_id = 0, addr, cnt;
  1314. int ret = MV_OK, ret_tmp;
  1315. u32 data_read[MAX_INTERFACE_NUM];
  1316. struct hws_topology_map *tm = ddr3_get_topology_map();
  1317. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1318. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1319. addr = mem_addr;
  1320. for (cnt = 0; cnt <= burst_length; cnt++) {
  1321. seq = (seq + 1) % 8;
  1322. if (write_type != 0) {
  1323. CHECK_STATUS(ddr3_tip_ext_write
  1324. (dev_num, if_id, addr, 1,
  1325. xsb_test_table[seq]));
  1326. }
  1327. if (read_type != 0) {
  1328. CHECK_STATUS(ddr3_tip_ext_read
  1329. (dev_num, if_id, addr, 1,
  1330. data_read));
  1331. }
  1332. if ((read_type != 0) && (write_type != 0)) {
  1333. ret_tmp =
  1334. ddr3_tip_compare(if_id,
  1335. xsb_test_table[seq],
  1336. data_read,
  1337. 0xff);
  1338. addr += (EXT_ACCESS_BURST_LENGTH * 4);
  1339. ret = (ret != MV_OK) ? ret : ret_tmp;
  1340. }
  1341. }
  1342. }
  1343. return ret;
  1344. }
  1345. #else /*EXCLUDE_SWITCH_DEBUG */
  1346. u32 rl_version = 1; /* 0 - old RL machine */
  1347. u32 vref = 0x4;
  1348. u32 start_xsb_offset = 0;
  1349. u8 cs_mask_reg[] = {
  1350. 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1351. };
  1352. int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type,
  1353. u32 read_type, u32 burst_length)
  1354. {
  1355. return MV_OK;
  1356. }
  1357. #endif