ddr3_training_leveling.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. #define WL_ITERATION_NUM 10
  13. #define ONE_CLOCK_ERROR_SHIFT 2
  14. #define ALIGN_ERROR_SHIFT -2
  15. static u32 pup_mask_table[] = {
  16. 0x000000ff,
  17. 0x0000ff00,
  18. 0x00ff0000,
  19. 0xff000000
  20. };
  21. static struct write_supp_result wr_supp_res[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  22. static int ddr3_tip_dynamic_write_leveling_seq(u32 dev_num);
  23. static int ddr3_tip_dynamic_read_leveling_seq(u32 dev_num);
  24. static int ddr3_tip_dynamic_per_bit_read_leveling_seq(u32 dev_num);
  25. static int ddr3_tip_wl_supp_align_err_shift(u32 dev_num, u32 if_id, u32 bus_id,
  26. u32 bus_id_delta);
  27. static int ddr3_tip_wl_supp_align_phase_shift(u32 dev_num, u32 if_id,
  28. u32 bus_id, u32 offset,
  29. u32 bus_id_delta);
  30. static int ddr3_tip_xsb_compare_test(u32 dev_num, u32 if_id, u32 bus_id,
  31. u32 edge_offset, u32 bus_id_delta);
  32. static int ddr3_tip_wl_supp_one_clk_err_shift(u32 dev_num, u32 if_id,
  33. u32 bus_id, u32 bus_id_delta);
  34. u32 hws_ddr3_tip_max_cs_get(void)
  35. {
  36. u32 c_cs;
  37. static u32 max_cs;
  38. struct hws_topology_map *tm = ddr3_get_topology_map();
  39. if (!max_cs) {
  40. for (c_cs = 0; c_cs < NUM_OF_CS; c_cs++) {
  41. VALIDATE_ACTIVE(tm->
  42. interface_params[0].as_bus_params[0].
  43. cs_bitmask, c_cs);
  44. max_cs++;
  45. }
  46. }
  47. return max_cs;
  48. }
  49. /*****************************************************************************
  50. Dynamic read leveling
  51. ******************************************************************************/
  52. int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 freq)
  53. {
  54. u32 data, mask;
  55. u32 max_cs = hws_ddr3_tip_max_cs_get();
  56. u32 bus_num, if_id, cl_val;
  57. enum hws_speed_bin speed_bin_index;
  58. /* save current CS value */
  59. u32 cs_enable_reg_val[MAX_INTERFACE_NUM] = { 0 };
  60. int is_any_pup_fail = 0;
  61. u32 data_read[MAX_INTERFACE_NUM + 1] = { 0 };
  62. u8 rl_values[NUM_OF_CS][MAX_BUS_NUM][MAX_INTERFACE_NUM];
  63. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  64. u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  65. struct hws_topology_map *tm = ddr3_get_topology_map();
  66. if (rl_version == 0) {
  67. /* OLD RL machine */
  68. data = 0x40;
  69. data |= (1 << 20);
  70. /* TBD multi CS */
  71. CHECK_STATUS(ddr3_tip_if_write(
  72. dev_num, ACCESS_TYPE_MULTICAST,
  73. PARAM_NOT_CARE, TRAINING_REG,
  74. data, 0x11ffff));
  75. CHECK_STATUS(ddr3_tip_if_write(
  76. dev_num, ACCESS_TYPE_MULTICAST,
  77. PARAM_NOT_CARE,
  78. TRAINING_PATTERN_BASE_ADDRESS_REG,
  79. 0, 0xfffffff8));
  80. CHECK_STATUS(ddr3_tip_if_write(
  81. dev_num, ACCESS_TYPE_MULTICAST,
  82. PARAM_NOT_CARE, TRAINING_REG,
  83. (u32)(1 << 31), (u32)(1 << 31)));
  84. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  85. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  86. training_result[training_stage][if_id] = TEST_SUCCESS;
  87. if (ddr3_tip_if_polling
  88. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0,
  89. (u32)(1 << 31), TRAINING_REG,
  90. MAX_POLLING_ITERATIONS) != MV_OK) {
  91. DEBUG_LEVELING(
  92. DEBUG_LEVEL_ERROR,
  93. ("RL: DDR3 poll failed(1) IF %d\n",
  94. if_id));
  95. training_result[training_stage][if_id] =
  96. TEST_FAILED;
  97. if (debug_mode == 0)
  98. return MV_FAIL;
  99. }
  100. }
  101. /* read read-leveling result */
  102. CHECK_STATUS(ddr3_tip_if_read
  103. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  104. TRAINING_REG, data_read, 1 << 30));
  105. /* exit read leveling mode */
  106. CHECK_STATUS(ddr3_tip_if_write
  107. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  108. TRAINING_SW_2_REG, 0x8, 0x9));
  109. CHECK_STATUS(ddr3_tip_if_write
  110. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  111. TRAINING_SW_1_REG, 1 << 16, 1 << 16));
  112. /* disable RL machine all Trn_CS[3:0] , [16:0] */
  113. CHECK_STATUS(ddr3_tip_if_write
  114. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  115. TRAINING_REG, 0, 0xf1ffff));
  116. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  117. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  118. if ((data_read[if_id] & (1 << 30)) == 0) {
  119. DEBUG_LEVELING(
  120. DEBUG_LEVEL_ERROR,
  121. ("\n_read Leveling failed for IF %d\n",
  122. if_id));
  123. training_result[training_stage][if_id] =
  124. TEST_FAILED;
  125. if (debug_mode == 0)
  126. return MV_FAIL;
  127. }
  128. }
  129. return MV_OK;
  130. }
  131. /* NEW RL machine */
  132. for (effective_cs = 0; effective_cs < NUM_OF_CS; effective_cs++)
  133. for (bus_num = 0; bus_num < MAX_BUS_NUM; bus_num++)
  134. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++)
  135. rl_values[effective_cs][bus_num][if_id] = 0;
  136. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  137. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  138. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  139. training_result[training_stage][if_id] = TEST_SUCCESS;
  140. /* save current cs enable reg val */
  141. CHECK_STATUS(ddr3_tip_if_read
  142. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  143. CS_ENABLE_REG, cs_enable_reg_val,
  144. MASK_ALL_BITS));
  145. /* enable single cs */
  146. CHECK_STATUS(ddr3_tip_if_write
  147. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  148. CS_ENABLE_REG, (1 << 3), (1 << 3)));
  149. }
  150. ddr3_tip_reset_fifo_ptr(dev_num);
  151. /*
  152. * Phase 1: Load pattern (using ODPG)
  153. *
  154. * enter Read Leveling mode
  155. * only 27 bits are masked
  156. * assuming non multi-CS configuration
  157. * write to CS = 0 for the non multi CS configuration, note
  158. * that the results shall be read back to the required CS !!!
  159. */
  160. /* BUS count is 0 shifted 26 */
  161. CHECK_STATUS(ddr3_tip_if_write
  162. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  163. ODPG_DATA_CONTROL_REG, 0x3, 0x3));
  164. CHECK_STATUS(ddr3_tip_configure_odpg
  165. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0,
  166. pattern_table[PATTERN_RL].num_of_phases_tx, 0,
  167. pattern_table[PATTERN_RL].num_of_phases_rx, 0, 0,
  168. effective_cs, STRESS_NONE, DURATION_SINGLE));
  169. /* load pattern to ODPG */
  170. ddr3_tip_load_pattern_to_odpg(dev_num, ACCESS_TYPE_MULTICAST,
  171. PARAM_NOT_CARE, PATTERN_RL,
  172. pattern_table[PATTERN_RL].
  173. start_addr);
  174. /*
  175. * Phase 2: ODPG to Read Leveling mode
  176. */
  177. /* General Training Opcode register */
  178. CHECK_STATUS(ddr3_tip_if_write
  179. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  180. ODPG_WRITE_READ_MODE_ENABLE_REG, 0,
  181. MASK_ALL_BITS));
  182. CHECK_STATUS(ddr3_tip_if_write
  183. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  184. ODPG_TRAINING_CONTROL_REG,
  185. (0x301b01 | effective_cs << 2), 0x3c3fef));
  186. /* Object1 opcode register 0 & 1 */
  187. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  188. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  189. speed_bin_index =
  190. tm->interface_params[if_id].speed_bin_index;
  191. cl_val =
  192. cas_latency_table[speed_bin_index].cl_val[freq];
  193. data = (cl_val << 17) | (0x3 << 25);
  194. mask = (0xff << 9) | (0x1f << 17) | (0x3 << 25);
  195. CHECK_STATUS(ddr3_tip_if_write
  196. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  197. ODPG_OBJ1_OPCODE_REG, data, mask));
  198. }
  199. /* Set iteration count to max value */
  200. CHECK_STATUS(ddr3_tip_if_write
  201. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  202. TRAINING_OPCODE_1_REG, 0xd00, 0xd00));
  203. /*
  204. * Phase 2: Mask config
  205. */
  206. ddr3_tip_dynamic_read_leveling_seq(dev_num);
  207. /*
  208. * Phase 3: Read Leveling execution
  209. */
  210. /* temporary jira dunit=14751 */
  211. CHECK_STATUS(ddr3_tip_if_write
  212. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  213. TRAINING_DBG_1_REG, 0, (u32)(1 << 31)));
  214. /* configure phy reset value */
  215. CHECK_STATUS(ddr3_tip_if_write
  216. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  217. TRAINING_DBG_3_REG, (0x7f << 24),
  218. (u32)(0xff << 24)));
  219. /* data pup rd reset enable */
  220. CHECK_STATUS(ddr3_tip_if_write
  221. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  222. SDRAM_CONFIGURATION_REG, 0, (1 << 30)));
  223. /* data pup rd reset disable */
  224. CHECK_STATUS(ddr3_tip_if_write
  225. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  226. SDRAM_CONFIGURATION_REG, (1 << 30), (1 << 30)));
  227. /* training SW override & training RL mode */
  228. CHECK_STATUS(ddr3_tip_if_write
  229. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  230. TRAINING_SW_2_REG, 0x1, 0x9));
  231. /* training enable */
  232. CHECK_STATUS(ddr3_tip_if_write
  233. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  234. TRAINING_REG, (1 << 24) | (1 << 20),
  235. (1 << 24) | (1 << 20)));
  236. CHECK_STATUS(ddr3_tip_if_write
  237. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  238. TRAINING_REG, (u32)(1 << 31), (u32)(1 << 31)));
  239. /********* trigger training *******************/
  240. /* Trigger, poll on status and disable ODPG */
  241. CHECK_STATUS(ddr3_tip_if_write
  242. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  243. ODPG_TRAINING_TRIGGER_REG, 0x1, 0x1));
  244. CHECK_STATUS(ddr3_tip_if_write
  245. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  246. ODPG_TRAINING_STATUS_REG, 0x1, 0x1));
  247. /* check for training done + results pass */
  248. if (ddr3_tip_if_polling
  249. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0x2, 0x2,
  250. ODPG_TRAINING_STATUS_REG,
  251. MAX_POLLING_ITERATIONS) != MV_OK) {
  252. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  253. ("Training Done Failed\n"));
  254. return MV_FAIL;
  255. }
  256. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  257. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  258. CHECK_STATUS(ddr3_tip_if_read
  259. (dev_num, ACCESS_TYPE_UNICAST,
  260. if_id,
  261. ODPG_TRAINING_TRIGGER_REG, data_read,
  262. 0x4));
  263. data = data_read[if_id];
  264. if (data != 0x0) {
  265. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  266. ("Training Result Failed\n"));
  267. }
  268. }
  269. /*disable ODPG - Back to functional mode */
  270. CHECK_STATUS(ddr3_tip_if_write
  271. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  272. ODPG_ENABLE_REG, 0x1 << ODPG_DISABLE_OFFS,
  273. (0x1 << ODPG_DISABLE_OFFS)));
  274. if (ddr3_tip_if_polling
  275. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0x0, 0x1,
  276. ODPG_ENABLE_REG, MAX_POLLING_ITERATIONS) != MV_OK) {
  277. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  278. ("ODPG disable failed "));
  279. return MV_FAIL;
  280. }
  281. CHECK_STATUS(ddr3_tip_if_write
  282. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  283. ODPG_DATA_CONTROL_REG, 0, MASK_ALL_BITS));
  284. /* double loop on bus, pup */
  285. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  286. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  287. /* check training done */
  288. is_any_pup_fail = 0;
  289. for (bus_num = 0;
  290. bus_num < tm->num_of_bus_per_interface;
  291. bus_num++) {
  292. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  293. if (ddr3_tip_if_polling
  294. (dev_num, ACCESS_TYPE_UNICAST,
  295. if_id, (1 << 25), (1 << 25),
  296. mask_results_pup_reg_map[bus_num],
  297. MAX_POLLING_ITERATIONS) != MV_OK) {
  298. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  299. ("\n_r_l: DDR3 poll failed(2) for bus %d",
  300. bus_num));
  301. is_any_pup_fail = 1;
  302. } else {
  303. /* read result per pup */
  304. CHECK_STATUS(ddr3_tip_if_read
  305. (dev_num,
  306. ACCESS_TYPE_UNICAST,
  307. if_id,
  308. mask_results_pup_reg_map
  309. [bus_num], data_read,
  310. 0xff));
  311. rl_values[effective_cs][bus_num]
  312. [if_id] = (u8)data_read[if_id];
  313. }
  314. }
  315. if (is_any_pup_fail == 1) {
  316. training_result[training_stage][if_id] =
  317. TEST_FAILED;
  318. if (debug_mode == 0)
  319. return MV_FAIL;
  320. }
  321. }
  322. DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("RL exit read leveling\n"));
  323. /*
  324. * Phase 3: Exit Read Leveling
  325. */
  326. CHECK_STATUS(ddr3_tip_if_write
  327. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  328. TRAINING_SW_2_REG, (1 << 3), (1 << 3)));
  329. CHECK_STATUS(ddr3_tip_if_write
  330. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  331. TRAINING_SW_1_REG, (1 << 16), (1 << 16)));
  332. /* set ODPG to functional */
  333. CHECK_STATUS(ddr3_tip_if_write
  334. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  335. ODPG_DATA_CONTROL_REG, 0x0, MASK_ALL_BITS));
  336. /*
  337. * Copy the result from the effective CS search to the
  338. * real Functional CS
  339. */
  340. /*ddr3_tip_write_cs_result(dev_num, RL_PHY_REG); */
  341. CHECK_STATUS(ddr3_tip_if_write
  342. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  343. ODPG_DATA_CONTROL_REG, 0x0, MASK_ALL_BITS));
  344. }
  345. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  346. /* double loop on bus, pup */
  347. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  348. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  349. for (bus_num = 0;
  350. bus_num < tm->num_of_bus_per_interface;
  351. bus_num++) {
  352. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  353. /* read result per pup from arry */
  354. data = rl_values[effective_cs][bus_num][if_id];
  355. data = (data & 0x1f) |
  356. (((data & 0xe0) >> 5) << 6);
  357. ddr3_tip_bus_write(dev_num,
  358. ACCESS_TYPE_UNICAST,
  359. if_id,
  360. ACCESS_TYPE_UNICAST,
  361. bus_num, DDR_PHY_DATA,
  362. RL_PHY_REG +
  363. ((effective_cs ==
  364. 0) ? 0x0 : 0x4), data);
  365. }
  366. }
  367. }
  368. /* Set to 0 after each loop to avoid illegal value may be used */
  369. effective_cs = 0;
  370. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  371. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  372. /* restore cs enable value */
  373. CHECK_STATUS(ddr3_tip_if_write
  374. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  375. CS_ENABLE_REG, cs_enable_reg_val[if_id],
  376. MASK_ALL_BITS));
  377. if (odt_config != 0) {
  378. CHECK_STATUS(ddr3_tip_write_additional_odt_setting
  379. (dev_num, if_id));
  380. }
  381. }
  382. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  383. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  384. if (training_result[training_stage][if_id] == TEST_FAILED)
  385. return MV_FAIL;
  386. }
  387. return MV_OK;
  388. }
  389. /*
  390. * Legacy Dynamic write leveling
  391. */
  392. int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num)
  393. {
  394. u32 c_cs, if_id, cs_mask = 0;
  395. u32 max_cs = hws_ddr3_tip_max_cs_get();
  396. struct hws_topology_map *tm = ddr3_get_topology_map();
  397. /*
  398. * In TRAINIUNG reg (0x15b0) write 0x80000008 | cs_mask:
  399. * Trn_start
  400. * cs_mask = 0x1 <<20 Trn_CS0 - CS0 is included in the DDR3 training
  401. * cs_mask = 0x1 <<21 Trn_CS1 - CS1 is included in the DDR3 training
  402. * cs_mask = 0x1 <<22 Trn_CS2 - CS2 is included in the DDR3 training
  403. * cs_mask = 0x1 <<23 Trn_CS3 - CS3 is included in the DDR3 training
  404. * Trn_auto_seq = write leveling
  405. */
  406. for (c_cs = 0; c_cs < max_cs; c_cs++)
  407. cs_mask = cs_mask | 1 << (20 + c_cs);
  408. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  409. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  410. CHECK_STATUS(ddr3_tip_if_write
  411. (dev_num, ACCESS_TYPE_MULTICAST, 0,
  412. TRAINING_REG, (0x80000008 | cs_mask),
  413. 0xffffffff));
  414. mdelay(20);
  415. if (ddr3_tip_if_polling
  416. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0,
  417. (u32)0x80000000, TRAINING_REG,
  418. MAX_POLLING_ITERATIONS) != MV_OK) {
  419. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  420. ("polling failed for Old WL result\n"));
  421. return MV_FAIL;
  422. }
  423. }
  424. return MV_OK;
  425. }
  426. /*
  427. * Legacy Dynamic read leveling
  428. */
  429. int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num)
  430. {
  431. u32 c_cs, if_id, cs_mask = 0;
  432. u32 max_cs = hws_ddr3_tip_max_cs_get();
  433. struct hws_topology_map *tm = ddr3_get_topology_map();
  434. /*
  435. * In TRAINIUNG reg (0x15b0) write 0x80000040 | cs_mask:
  436. * Trn_start
  437. * cs_mask = 0x1 <<20 Trn_CS0 - CS0 is included in the DDR3 training
  438. * cs_mask = 0x1 <<21 Trn_CS1 - CS1 is included in the DDR3 training
  439. * cs_mask = 0x1 <<22 Trn_CS2 - CS2 is included in the DDR3 training
  440. * cs_mask = 0x1 <<23 Trn_CS3 - CS3 is included in the DDR3 training
  441. * Trn_auto_seq = Read Leveling using training pattern
  442. */
  443. for (c_cs = 0; c_cs < max_cs; c_cs++)
  444. cs_mask = cs_mask | 1 << (20 + c_cs);
  445. CHECK_STATUS(ddr3_tip_if_write
  446. (dev_num, ACCESS_TYPE_MULTICAST, 0, TRAINING_REG,
  447. (0x80000040 | cs_mask), 0xffffffff));
  448. mdelay(100);
  449. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  450. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  451. if (ddr3_tip_if_polling
  452. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0,
  453. (u32)0x80000000, TRAINING_REG,
  454. MAX_POLLING_ITERATIONS) != MV_OK) {
  455. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  456. ("polling failed for Old RL result\n"));
  457. return MV_FAIL;
  458. }
  459. }
  460. return MV_OK;
  461. }
  462. /*
  463. * Dynamic per bit read leveling
  464. */
  465. int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 freq)
  466. {
  467. u32 data, mask;
  468. u32 bus_num, if_id, cl_val, bit_num;
  469. u32 curr_numb, curr_min_delay;
  470. int adll_array[3] = { 0, -0xa, 0x14 };
  471. u32 phyreg3_arr[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  472. enum hws_speed_bin speed_bin_index;
  473. int is_any_pup_fail = 0;
  474. int break_loop = 0;
  475. u32 cs_enable_reg_val[MAX_INTERFACE_NUM]; /* save current CS value */
  476. u32 data_read[MAX_INTERFACE_NUM];
  477. int per_bit_rl_pup_status[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  478. u32 data2_write[MAX_INTERFACE_NUM][MAX_BUS_NUM];
  479. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  480. u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
  481. struct hws_topology_map *tm = ddr3_get_topology_map();
  482. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  483. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  484. for (bus_num = 0;
  485. bus_num <= tm->num_of_bus_per_interface; bus_num++) {
  486. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  487. per_bit_rl_pup_status[if_id][bus_num] = 0;
  488. data2_write[if_id][bus_num] = 0;
  489. /* read current value of phy register 0x3 */
  490. CHECK_STATUS(ddr3_tip_bus_read
  491. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  492. bus_num, DDR_PHY_DATA,
  493. READ_CENTRALIZATION_PHY_REG,
  494. &phyreg3_arr[if_id][bus_num]));
  495. }
  496. }
  497. /* NEW RL machine */
  498. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  499. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  500. training_result[training_stage][if_id] = TEST_SUCCESS;
  501. /* save current cs enable reg val */
  502. CHECK_STATUS(ddr3_tip_if_read
  503. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  504. CS_ENABLE_REG, &cs_enable_reg_val[if_id],
  505. MASK_ALL_BITS));
  506. /* enable single cs */
  507. CHECK_STATUS(ddr3_tip_if_write
  508. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  509. CS_ENABLE_REG, (1 << 3), (1 << 3)));
  510. }
  511. ddr3_tip_reset_fifo_ptr(dev_num);
  512. for (curr_numb = 0; curr_numb < 3; curr_numb++) {
  513. /*
  514. * Phase 1: Load pattern (using ODPG)
  515. *
  516. * enter Read Leveling mode
  517. * only 27 bits are masked
  518. * assuming non multi-CS configuration
  519. * write to CS = 0 for the non multi CS configuration, note that
  520. * the results shall be read back to the required CS !!!
  521. */
  522. /* BUS count is 0 shifted 26 */
  523. CHECK_STATUS(ddr3_tip_if_write
  524. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  525. ODPG_DATA_CONTROL_REG, 0x3, 0x3));
  526. CHECK_STATUS(ddr3_tip_configure_odpg
  527. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0,
  528. pattern_table[PATTERN_TEST].num_of_phases_tx, 0,
  529. pattern_table[PATTERN_TEST].num_of_phases_rx, 0,
  530. 0, 0, STRESS_NONE, DURATION_SINGLE));
  531. /* load pattern to ODPG */
  532. ddr3_tip_load_pattern_to_odpg(dev_num, ACCESS_TYPE_MULTICAST,
  533. PARAM_NOT_CARE, PATTERN_TEST,
  534. pattern_table[PATTERN_TEST].
  535. start_addr);
  536. /*
  537. * Phase 2: ODPG to Read Leveling mode
  538. */
  539. /* General Training Opcode register */
  540. CHECK_STATUS(ddr3_tip_if_write
  541. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  542. ODPG_WRITE_READ_MODE_ENABLE_REG, 0,
  543. MASK_ALL_BITS));
  544. CHECK_STATUS(ddr3_tip_if_write
  545. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  546. ODPG_TRAINING_CONTROL_REG, 0x301b01, 0x3c3fef));
  547. /* Object1 opcode register 0 & 1 */
  548. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  549. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  550. speed_bin_index =
  551. tm->interface_params[if_id].speed_bin_index;
  552. cl_val =
  553. cas_latency_table[speed_bin_index].cl_val[freq];
  554. data = (cl_val << 17) | (0x3 << 25);
  555. mask = (0xff << 9) | (0x1f << 17) | (0x3 << 25);
  556. CHECK_STATUS(ddr3_tip_if_write
  557. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  558. ODPG_OBJ1_OPCODE_REG, data, mask));
  559. }
  560. /* Set iteration count to max value */
  561. CHECK_STATUS(ddr3_tip_if_write
  562. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  563. TRAINING_OPCODE_1_REG, 0xd00, 0xd00));
  564. /*
  565. * Phase 2: Mask config
  566. */
  567. ddr3_tip_dynamic_per_bit_read_leveling_seq(dev_num);
  568. /*
  569. * Phase 3: Read Leveling execution
  570. */
  571. /* temporary jira dunit=14751 */
  572. CHECK_STATUS(ddr3_tip_if_write
  573. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  574. TRAINING_DBG_1_REG, 0, (u32)(1 << 31)));
  575. /* configure phy reset value */
  576. CHECK_STATUS(ddr3_tip_if_write
  577. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  578. TRAINING_DBG_3_REG, (0x7f << 24),
  579. (u32)(0xff << 24)));
  580. /* data pup rd reset enable */
  581. CHECK_STATUS(ddr3_tip_if_write
  582. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  583. SDRAM_CONFIGURATION_REG, 0, (1 << 30)));
  584. /* data pup rd reset disable */
  585. CHECK_STATUS(ddr3_tip_if_write
  586. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  587. SDRAM_CONFIGURATION_REG, (1 << 30), (1 << 30)));
  588. /* training SW override & training RL mode */
  589. CHECK_STATUS(ddr3_tip_if_write
  590. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  591. TRAINING_SW_2_REG, 0x1, 0x9));
  592. /* training enable */
  593. CHECK_STATUS(ddr3_tip_if_write
  594. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  595. TRAINING_REG, (1 << 24) | (1 << 20),
  596. (1 << 24) | (1 << 20)));
  597. CHECK_STATUS(ddr3_tip_if_write
  598. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  599. TRAINING_REG, (u32)(1 << 31), (u32)(1 << 31)));
  600. /********* trigger training *******************/
  601. /* Trigger, poll on status and disable ODPG */
  602. CHECK_STATUS(ddr3_tip_if_write
  603. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  604. ODPG_TRAINING_TRIGGER_REG, 0x1, 0x1));
  605. CHECK_STATUS(ddr3_tip_if_write
  606. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  607. ODPG_TRAINING_STATUS_REG, 0x1, 0x1));
  608. /*check for training done + results pass */
  609. if (ddr3_tip_if_polling
  610. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0x2, 0x2,
  611. ODPG_TRAINING_STATUS_REG,
  612. MAX_POLLING_ITERATIONS) != MV_OK) {
  613. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  614. ("Training Done Failed\n"));
  615. return MV_FAIL;
  616. }
  617. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  618. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  619. CHECK_STATUS(ddr3_tip_if_read
  620. (dev_num, ACCESS_TYPE_UNICAST,
  621. if_id,
  622. ODPG_TRAINING_TRIGGER_REG, data_read,
  623. 0x4));
  624. data = data_read[if_id];
  625. if (data != 0x0) {
  626. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  627. ("Training Result Failed\n"));
  628. }
  629. }
  630. /*disable ODPG - Back to functional mode */
  631. CHECK_STATUS(ddr3_tip_if_write
  632. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  633. ODPG_ENABLE_REG, 0x1 << ODPG_DISABLE_OFFS,
  634. (0x1 << ODPG_DISABLE_OFFS)));
  635. if (ddr3_tip_if_polling
  636. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0x0, 0x1,
  637. ODPG_ENABLE_REG, MAX_POLLING_ITERATIONS) != MV_OK) {
  638. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  639. ("ODPG disable failed "));
  640. return MV_FAIL;
  641. }
  642. CHECK_STATUS(ddr3_tip_if_write
  643. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  644. ODPG_DATA_CONTROL_REG, 0, MASK_ALL_BITS));
  645. /* double loop on bus, pup */
  646. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  647. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  648. /* check training done */
  649. for (bus_num = 0;
  650. bus_num < tm->num_of_bus_per_interface;
  651. bus_num++) {
  652. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  653. if (per_bit_rl_pup_status[if_id][bus_num]
  654. == 0) {
  655. curr_min_delay = 0;
  656. for (bit_num = 0; bit_num < 8;
  657. bit_num++) {
  658. if (ddr3_tip_if_polling
  659. (dev_num,
  660. ACCESS_TYPE_UNICAST,
  661. if_id, (1 << 25),
  662. (1 << 25),
  663. mask_results_dq_reg_map
  664. [bus_num * 8 + bit_num],
  665. MAX_POLLING_ITERATIONS) !=
  666. MV_OK) {
  667. DEBUG_LEVELING
  668. (DEBUG_LEVEL_ERROR,
  669. ("\n_r_l: DDR3 poll failed(2) for bus %d bit %d\n",
  670. bus_num,
  671. bit_num));
  672. } else {
  673. /* read result per pup */
  674. CHECK_STATUS
  675. (ddr3_tip_if_read
  676. (dev_num,
  677. ACCESS_TYPE_UNICAST,
  678. if_id,
  679. mask_results_dq_reg_map
  680. [bus_num * 8 +
  681. bit_num],
  682. data_read,
  683. MASK_ALL_BITS));
  684. data =
  685. (data_read
  686. [if_id] &
  687. 0x1f) |
  688. ((data_read
  689. [if_id] &
  690. 0xe0) << 1);
  691. if (curr_min_delay == 0)
  692. curr_min_delay =
  693. data;
  694. else if (data <
  695. curr_min_delay)
  696. curr_min_delay =
  697. data;
  698. if (data > data2_write[if_id][bus_num])
  699. data2_write
  700. [if_id]
  701. [bus_num] =
  702. data;
  703. }
  704. }
  705. if (data2_write[if_id][bus_num] <=
  706. (curr_min_delay +
  707. MAX_DQ_READ_LEVELING_DELAY)) {
  708. per_bit_rl_pup_status[if_id]
  709. [bus_num] = 1;
  710. }
  711. }
  712. }
  713. }
  714. /* check if there is need to search new phyreg3 value */
  715. if (curr_numb < 2) {
  716. /* if there is DLL that is not checked yet */
  717. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
  718. if_id++) {
  719. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  720. for (bus_num = 0;
  721. bus_num < tm->num_of_bus_per_interface;
  722. bus_num++) {
  723. VALIDATE_ACTIVE(tm->bus_act_mask,
  724. bus_num);
  725. if (per_bit_rl_pup_status[if_id]
  726. [bus_num] != 1) {
  727. /* go to next ADLL value */
  728. CHECK_STATUS
  729. (ddr3_tip_bus_write
  730. (dev_num,
  731. ACCESS_TYPE_UNICAST,
  732. if_id,
  733. ACCESS_TYPE_UNICAST,
  734. bus_num, DDR_PHY_DATA,
  735. READ_CENTRALIZATION_PHY_REG,
  736. (phyreg3_arr[if_id]
  737. [bus_num] +
  738. adll_array[curr_numb])));
  739. break_loop = 1;
  740. break;
  741. }
  742. }
  743. if (break_loop)
  744. break;
  745. }
  746. } /* if (curr_numb < 2) */
  747. if (!break_loop)
  748. break;
  749. } /* for ( curr_numb = 0; curr_numb <3; curr_numb++) */
  750. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  751. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  752. for (bus_num = 0; bus_num < tm->num_of_bus_per_interface;
  753. bus_num++) {
  754. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  755. if (per_bit_rl_pup_status[if_id][bus_num] == 1)
  756. ddr3_tip_bus_write(dev_num,
  757. ACCESS_TYPE_UNICAST,
  758. if_id,
  759. ACCESS_TYPE_UNICAST,
  760. bus_num, DDR_PHY_DATA,
  761. RL_PHY_REG +
  762. CS_REG_VALUE(effective_cs),
  763. data2_write[if_id]
  764. [bus_num]);
  765. else
  766. is_any_pup_fail = 1;
  767. }
  768. /* TBD flow does not support multi CS */
  769. /*
  770. * cs_bitmask = tm->interface_params[if_id].
  771. * as_bus_params[bus_num].cs_bitmask;
  772. */
  773. /* divide by 4 is used for retrieving the CS number */
  774. /*
  775. * TBD BC2 - what is the PHY address for other
  776. * CS ddr3_tip_write_cs_result() ???
  777. */
  778. /*
  779. * find what should be written to PHY
  780. * - max delay that is less than threshold
  781. */
  782. if (is_any_pup_fail == 1) {
  783. training_result[training_stage][if_id] = TEST_FAILED;
  784. if (debug_mode == 0)
  785. return MV_FAIL;
  786. }
  787. }
  788. DEBUG_LEVELING(DEBUG_LEVEL_INFO, ("RL exit read leveling\n"));
  789. /*
  790. * Phase 3: Exit Read Leveling
  791. */
  792. CHECK_STATUS(ddr3_tip_if_write
  793. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  794. TRAINING_SW_2_REG, (1 << 3), (1 << 3)));
  795. CHECK_STATUS(ddr3_tip_if_write
  796. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  797. TRAINING_SW_1_REG, (1 << 16), (1 << 16)));
  798. /* set ODPG to functional */
  799. CHECK_STATUS(ddr3_tip_if_write
  800. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  801. ODPG_DATA_CONTROL_REG, 0x0, MASK_ALL_BITS));
  802. /*
  803. * Copy the result from the effective CS search to the real
  804. * Functional CS
  805. */
  806. ddr3_tip_write_cs_result(dev_num, RL_PHY_REG);
  807. CHECK_STATUS(ddr3_tip_if_write
  808. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  809. ODPG_DATA_CONTROL_REG, 0x0, MASK_ALL_BITS));
  810. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  811. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  812. /* restore cs enable value */
  813. CHECK_STATUS(ddr3_tip_if_write
  814. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  815. CS_ENABLE_REG, cs_enable_reg_val[if_id],
  816. MASK_ALL_BITS));
  817. if (odt_config != 0) {
  818. CHECK_STATUS(ddr3_tip_write_additional_odt_setting
  819. (dev_num, if_id));
  820. }
  821. }
  822. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  823. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  824. if (training_result[training_stage][if_id] == TEST_FAILED)
  825. return MV_FAIL;
  826. }
  827. return MV_OK;
  828. }
  829. int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs,
  830. u32 *cs_mask)
  831. {
  832. u32 all_bus_cs = 0, same_bus_cs;
  833. u32 bus_cnt;
  834. struct hws_topology_map *tm = ddr3_get_topology_map();
  835. *cs_mask = same_bus_cs = CS_BIT_MASK;
  836. /*
  837. * In some of the devices (such as BC2), the CS is per pup and there
  838. * for mixed mode is valid on like other devices where CS configuration
  839. * is per interface.
  840. * In order to know that, we do 'Or' and 'And' operation between all
  841. * CS (of the pups).
  842. * If they are they are not the same then it's mixed mode so all CS
  843. * should be configured (when configuring the MRS)
  844. */
  845. for (bus_cnt = 0; bus_cnt < tm->num_of_bus_per_interface; bus_cnt++) {
  846. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  847. all_bus_cs |= tm->interface_params[if_id].
  848. as_bus_params[bus_cnt].cs_bitmask;
  849. same_bus_cs &= tm->interface_params[if_id].
  850. as_bus_params[bus_cnt].cs_bitmask;
  851. /* cs enable is active low */
  852. *cs_mask &= ~tm->interface_params[if_id].
  853. as_bus_params[bus_cnt].cs_bitmask;
  854. }
  855. if (all_bus_cs == same_bus_cs)
  856. *cs_mask = (*cs_mask | (~(1 << effective_cs))) & CS_BIT_MASK;
  857. return MV_OK;
  858. }
  859. /*
  860. * Dynamic write leveling
  861. */
  862. int ddr3_tip_dynamic_write_leveling(u32 dev_num)
  863. {
  864. u32 reg_data = 0, iter, if_id, bus_cnt;
  865. u32 cs_enable_reg_val[MAX_INTERFACE_NUM] = { 0 };
  866. u32 cs_mask[MAX_INTERFACE_NUM];
  867. u32 read_data_sample_delay_vals[MAX_INTERFACE_NUM] = { 0 };
  868. u32 read_data_ready_delay_vals[MAX_INTERFACE_NUM] = { 0 };
  869. /* 0 for failure */
  870. u32 res_values[MAX_INTERFACE_NUM * MAX_BUS_NUM] = { 0 };
  871. u32 test_res = 0; /* 0 - success for all pup */
  872. u32 data_read[MAX_INTERFACE_NUM];
  873. u8 wl_values[NUM_OF_CS][MAX_BUS_NUM][MAX_INTERFACE_NUM];
  874. u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  875. u32 cs_mask0[MAX_INTERFACE_NUM] = { 0 };
  876. u32 max_cs = hws_ddr3_tip_max_cs_get();
  877. struct hws_topology_map *tm = ddr3_get_topology_map();
  878. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  879. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  880. training_result[training_stage][if_id] = TEST_SUCCESS;
  881. /* save Read Data Sample Delay */
  882. CHECK_STATUS(ddr3_tip_if_read
  883. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  884. READ_DATA_SAMPLE_DELAY,
  885. read_data_sample_delay_vals, MASK_ALL_BITS));
  886. /* save Read Data Ready Delay */
  887. CHECK_STATUS(ddr3_tip_if_read
  888. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  889. READ_DATA_READY_DELAY, read_data_ready_delay_vals,
  890. MASK_ALL_BITS));
  891. /* save current cs reg val */
  892. CHECK_STATUS(ddr3_tip_if_read
  893. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  894. CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
  895. }
  896. /*
  897. * Phase 1: DRAM 2 Write Leveling mode
  898. */
  899. /*Assert 10 refresh commands to DRAM to all CS */
  900. for (iter = 0; iter < WL_ITERATION_NUM; iter++) {
  901. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  902. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  903. CHECK_STATUS(ddr3_tip_if_write
  904. (dev_num, ACCESS_TYPE_UNICAST,
  905. if_id, SDRAM_OPERATION_REG,
  906. (u32)((~(0xf) << 8) | 0x2), 0xf1f));
  907. }
  908. }
  909. /* check controller back to normal */
  910. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  911. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  912. if (ddr3_tip_if_polling
  913. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f,
  914. SDRAM_OPERATION_REG, MAX_POLLING_ITERATIONS) != MV_OK) {
  915. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  916. ("WL: DDR3 poll failed(3)"));
  917. }
  918. }
  919. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  920. /*enable write leveling to all cs - Q off , WL n */
  921. /* calculate interface cs mask */
  922. CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask0, MRS1_CMD,
  923. 0x1000, 0x1080));
  924. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  925. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  926. /* cs enable is active low */
  927. ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs,
  928. &cs_mask[if_id]);
  929. }
  930. /* Enable Output buffer to relevant CS - Q on , WL on */
  931. CHECK_STATUS(ddr3_tip_write_mrs_cmd
  932. (dev_num, cs_mask, MRS1_CMD, 0x80, 0x1080));
  933. /*enable odt for relevant CS */
  934. CHECK_STATUS(ddr3_tip_if_write
  935. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  936. 0x1498, (0x3 << (effective_cs * 2)), 0xf));
  937. /*
  938. * Phase 2: Set training IP to write leveling mode
  939. */
  940. CHECK_STATUS(ddr3_tip_dynamic_write_leveling_seq(dev_num));
  941. /*
  942. * Phase 3: Trigger training
  943. */
  944. CHECK_STATUS(ddr3_tip_if_write
  945. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  946. ODPG_TRAINING_TRIGGER_REG, 0x1, 0x1));
  947. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  948. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  949. /* training done */
  950. if (ddr3_tip_if_polling
  951. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  952. (1 << 1), (1 << 1), ODPG_TRAINING_STATUS_REG,
  953. MAX_POLLING_ITERATIONS) != MV_OK) {
  954. DEBUG_LEVELING(
  955. DEBUG_LEVEL_ERROR,
  956. ("WL: DDR3 poll (4) failed (Data: 0x%x)\n",
  957. reg_data));
  958. }
  959. #if !defined(CONFIG_ARMADA_38X) /*Disabled. JIRA #1498 */
  960. else {
  961. CHECK_STATUS(ddr3_tip_if_read
  962. (dev_num, ACCESS_TYPE_UNICAST,
  963. if_id,
  964. ODPG_TRAINING_TRIGGER_REG,
  965. &reg_data, (1 << 2)));
  966. if (reg_data != 0) {
  967. DEBUG_LEVELING(
  968. DEBUG_LEVEL_ERROR,
  969. ("WL: WL failed IF %d reg_data=0x%x\n",
  970. if_id, reg_data));
  971. }
  972. }
  973. #endif
  974. }
  975. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  976. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  977. /* training done */
  978. if (ddr3_tip_if_polling
  979. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  980. (1 << 1), (1 << 1), ODPG_TRAINING_STATUS_REG,
  981. MAX_POLLING_ITERATIONS) != MV_OK) {
  982. DEBUG_LEVELING(
  983. DEBUG_LEVEL_ERROR,
  984. ("WL: DDR3 poll (4) failed (Data: 0x%x)\n",
  985. reg_data));
  986. } else {
  987. #if !defined(CONFIG_ARMADA_38X) /*Disabled. JIRA #1498 */
  988. CHECK_STATUS(ddr3_tip_if_read
  989. (dev_num, ACCESS_TYPE_UNICAST,
  990. if_id,
  991. ODPG_TRAINING_STATUS_REG,
  992. data_read, (1 << 2)));
  993. reg_data = data_read[if_id];
  994. if (reg_data != 0) {
  995. DEBUG_LEVELING(
  996. DEBUG_LEVEL_ERROR,
  997. ("WL: WL failed IF %d reg_data=0x%x\n",
  998. if_id, reg_data));
  999. }
  1000. #endif
  1001. /* check for training completion per bus */
  1002. for (bus_cnt = 0;
  1003. bus_cnt < tm->num_of_bus_per_interface;
  1004. bus_cnt++) {
  1005. VALIDATE_ACTIVE(tm->bus_act_mask,
  1006. bus_cnt);
  1007. /* training status */
  1008. CHECK_STATUS(ddr3_tip_if_read
  1009. (dev_num,
  1010. ACCESS_TYPE_UNICAST,
  1011. if_id,
  1012. mask_results_pup_reg_map
  1013. [bus_cnt], data_read,
  1014. (1 << 25)));
  1015. reg_data = data_read[if_id];
  1016. DEBUG_LEVELING(
  1017. DEBUG_LEVEL_TRACE,
  1018. ("WL: IF %d BUS %d reg 0x%x\n",
  1019. if_id, bus_cnt, reg_data));
  1020. if (reg_data == 0) {
  1021. res_values[
  1022. (if_id *
  1023. tm->num_of_bus_per_interface)
  1024. + bus_cnt] = 1;
  1025. }
  1026. CHECK_STATUS(ddr3_tip_if_read
  1027. (dev_num,
  1028. ACCESS_TYPE_UNICAST,
  1029. if_id,
  1030. mask_results_pup_reg_map
  1031. [bus_cnt], data_read,
  1032. 0xff));
  1033. /*
  1034. * Save the read value that should be
  1035. * write to PHY register
  1036. */
  1037. wl_values[effective_cs]
  1038. [bus_cnt][if_id] =
  1039. (u8)data_read[if_id];
  1040. }
  1041. }
  1042. }
  1043. /*
  1044. * Phase 4: Exit write leveling mode
  1045. */
  1046. /* disable DQs toggling */
  1047. CHECK_STATUS(ddr3_tip_if_write
  1048. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1049. WR_LEVELING_DQS_PATTERN_REG, 0x0, 0x1));
  1050. /* Update MRS 1 (WL off) */
  1051. CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask0, MRS1_CMD,
  1052. 0x1000, 0x1080));
  1053. /* Update MRS 1 (return to functional mode - Q on , WL off) */
  1054. CHECK_STATUS(ddr3_tip_write_mrs_cmd
  1055. (dev_num, cs_mask0, MRS1_CMD, 0x0, 0x1080));
  1056. /* set phy to normal mode */
  1057. CHECK_STATUS(ddr3_tip_if_write
  1058. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1059. TRAINING_SW_2_REG, 0x5, 0x7));
  1060. /* exit sw override mode */
  1061. CHECK_STATUS(ddr3_tip_if_write
  1062. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1063. TRAINING_SW_2_REG, 0x4, 0x7));
  1064. }
  1065. /*
  1066. * Phase 5: Load WL values to each PHY
  1067. */
  1068. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1069. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1070. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1071. test_res = 0;
  1072. for (bus_cnt = 0;
  1073. bus_cnt < tm->num_of_bus_per_interface;
  1074. bus_cnt++) {
  1075. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  1076. /* check if result == pass */
  1077. if (res_values
  1078. [(if_id *
  1079. tm->num_of_bus_per_interface) +
  1080. bus_cnt] == 0) {
  1081. /*
  1082. * read result control register
  1083. * according to pup
  1084. */
  1085. reg_data =
  1086. wl_values[effective_cs][bus_cnt]
  1087. [if_id];
  1088. /*
  1089. * Write into write leveling register
  1090. * ([4:0] ADLL, [8:6] Phase, [15:10]
  1091. * (centralization) ADLL + 0x10)
  1092. */
  1093. reg_data =
  1094. (reg_data & 0x1f) |
  1095. (((reg_data & 0xe0) >> 5) << 6) |
  1096. (((reg_data & 0x1f) +
  1097. phy_reg1_val) << 10);
  1098. ddr3_tip_bus_write(
  1099. dev_num,
  1100. ACCESS_TYPE_UNICAST,
  1101. if_id,
  1102. ACCESS_TYPE_UNICAST,
  1103. bus_cnt,
  1104. DDR_PHY_DATA,
  1105. WL_PHY_REG +
  1106. effective_cs *
  1107. CS_REGISTER_ADDR_OFFSET,
  1108. reg_data);
  1109. } else {
  1110. test_res = 1;
  1111. /*
  1112. * read result control register
  1113. * according to pup
  1114. */
  1115. CHECK_STATUS(ddr3_tip_if_read
  1116. (dev_num,
  1117. ACCESS_TYPE_UNICAST,
  1118. if_id,
  1119. mask_results_pup_reg_map
  1120. [bus_cnt], data_read,
  1121. 0xff));
  1122. reg_data = data_read[if_id];
  1123. DEBUG_LEVELING(
  1124. DEBUG_LEVEL_ERROR,
  1125. ("WL: IF %d BUS %d failed, reg 0x%x\n",
  1126. if_id, bus_cnt, reg_data));
  1127. }
  1128. }
  1129. if (test_res != 0) {
  1130. training_result[training_stage][if_id] =
  1131. TEST_FAILED;
  1132. }
  1133. }
  1134. }
  1135. /* Set to 0 after each loop to avoid illegal value may be used */
  1136. effective_cs = 0;
  1137. /*
  1138. * Copy the result from the effective CS search to the real
  1139. * Functional CS
  1140. */
  1141. /* ddr3_tip_write_cs_result(dev_num, WL_PHY_REG); */
  1142. /* restore saved values */
  1143. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1144. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1145. /* restore Read Data Sample Delay */
  1146. CHECK_STATUS(ddr3_tip_if_write
  1147. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1148. READ_DATA_SAMPLE_DELAY,
  1149. read_data_sample_delay_vals[if_id],
  1150. MASK_ALL_BITS));
  1151. /* restore Read Data Ready Delay */
  1152. CHECK_STATUS(ddr3_tip_if_write
  1153. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1154. READ_DATA_READY_DELAY,
  1155. read_data_ready_delay_vals[if_id],
  1156. MASK_ALL_BITS));
  1157. /* enable multi cs */
  1158. CHECK_STATUS(ddr3_tip_if_write
  1159. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1160. CS_ENABLE_REG, cs_enable_reg_val[if_id],
  1161. MASK_ALL_BITS));
  1162. }
  1163. /* Disable modt0 for CS0 training - need to adjust for multy CS */
  1164. CHECK_STATUS(ddr3_tip_if_write
  1165. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 0x1498,
  1166. 0x0, 0xf));
  1167. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1168. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1169. if (training_result[training_stage][if_id] == TEST_FAILED)
  1170. return MV_FAIL;
  1171. }
  1172. return MV_OK;
  1173. }
  1174. /*
  1175. * Dynamic write leveling supplementary
  1176. */
  1177. int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num)
  1178. {
  1179. int adll_offset;
  1180. u32 if_id, bus_id, data, data_tmp;
  1181. int is_if_fail = 0;
  1182. struct hws_topology_map *tm = ddr3_get_topology_map();
  1183. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1184. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1185. is_if_fail = 0;
  1186. for (bus_id = 0; bus_id < GET_TOPOLOGY_NUM_OF_BUSES();
  1187. bus_id++) {
  1188. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  1189. wr_supp_res[if_id][bus_id].is_pup_fail = 1;
  1190. CHECK_STATUS(ddr3_tip_bus_read
  1191. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1192. bus_id, DDR_PHY_DATA,
  1193. WRITE_CENTRALIZATION_PHY_REG +
  1194. effective_cs * CS_REGISTER_ADDR_OFFSET,
  1195. &data));
  1196. DEBUG_LEVELING(
  1197. DEBUG_LEVEL_TRACE,
  1198. ("WL Supp: adll_offset=0 data delay = %d\n",
  1199. data));
  1200. if (ddr3_tip_wl_supp_align_phase_shift
  1201. (dev_num, if_id, bus_id, 0, 0) == MV_OK) {
  1202. DEBUG_LEVELING(
  1203. DEBUG_LEVEL_TRACE,
  1204. ("WL Supp: IF %d bus_id %d adll_offset=0 Success !\n",
  1205. if_id, bus_id));
  1206. continue;
  1207. }
  1208. /* change adll */
  1209. adll_offset = 5;
  1210. CHECK_STATUS(ddr3_tip_bus_write
  1211. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1212. ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA,
  1213. WRITE_CENTRALIZATION_PHY_REG +
  1214. effective_cs * CS_REGISTER_ADDR_OFFSET,
  1215. data + adll_offset));
  1216. CHECK_STATUS(ddr3_tip_bus_read
  1217. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1218. bus_id, DDR_PHY_DATA,
  1219. WRITE_CENTRALIZATION_PHY_REG +
  1220. effective_cs * CS_REGISTER_ADDR_OFFSET,
  1221. &data_tmp));
  1222. DEBUG_LEVELING(
  1223. DEBUG_LEVEL_TRACE,
  1224. ("WL Supp: adll_offset= %d data delay = %d\n",
  1225. adll_offset, data_tmp));
  1226. if (ddr3_tip_wl_supp_align_phase_shift
  1227. (dev_num, if_id, bus_id, adll_offset, 0) == MV_OK) {
  1228. DEBUG_LEVELING(
  1229. DEBUG_LEVEL_TRACE,
  1230. ("WL Supp: IF %d bus_id %d adll_offset= %d Success !\n",
  1231. if_id, bus_id, adll_offset));
  1232. continue;
  1233. }
  1234. /* change adll */
  1235. adll_offset = -5;
  1236. CHECK_STATUS(ddr3_tip_bus_write
  1237. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1238. ACCESS_TYPE_UNICAST, bus_id, DDR_PHY_DATA,
  1239. WRITE_CENTRALIZATION_PHY_REG +
  1240. effective_cs * CS_REGISTER_ADDR_OFFSET,
  1241. data + adll_offset));
  1242. CHECK_STATUS(ddr3_tip_bus_read
  1243. (dev_num, if_id, ACCESS_TYPE_UNICAST,
  1244. bus_id, DDR_PHY_DATA,
  1245. WRITE_CENTRALIZATION_PHY_REG +
  1246. effective_cs * CS_REGISTER_ADDR_OFFSET,
  1247. &data_tmp));
  1248. DEBUG_LEVELING(
  1249. DEBUG_LEVEL_TRACE,
  1250. ("WL Supp: adll_offset= %d data delay = %d\n",
  1251. adll_offset, data_tmp));
  1252. if (ddr3_tip_wl_supp_align_phase_shift
  1253. (dev_num, if_id, bus_id, adll_offset, 0) == MV_OK) {
  1254. DEBUG_LEVELING(
  1255. DEBUG_LEVEL_TRACE,
  1256. ("WL Supp: IF %d bus_id %d adll_offset= %d Success !\n",
  1257. if_id, bus_id, adll_offset));
  1258. continue;
  1259. } else {
  1260. DEBUG_LEVELING(
  1261. DEBUG_LEVEL_ERROR,
  1262. ("WL Supp: IF %d bus_id %d Failed !\n",
  1263. if_id, bus_id));
  1264. is_if_fail = 1;
  1265. }
  1266. }
  1267. DEBUG_LEVELING(DEBUG_LEVEL_TRACE,
  1268. ("WL Supp: IF %d bus_id %d is_pup_fail %d\n",
  1269. if_id, bus_id, is_if_fail));
  1270. if (is_if_fail == 1) {
  1271. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  1272. ("WL Supp: IF %d failed\n", if_id));
  1273. training_result[training_stage][if_id] = TEST_FAILED;
  1274. } else {
  1275. training_result[training_stage][if_id] = TEST_SUCCESS;
  1276. }
  1277. }
  1278. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1279. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1280. if (training_result[training_stage][if_id] == TEST_FAILED)
  1281. return MV_FAIL;
  1282. }
  1283. return MV_OK;
  1284. }
  1285. /*
  1286. * Phase Shift
  1287. */
  1288. static int ddr3_tip_wl_supp_align_phase_shift(u32 dev_num, u32 if_id,
  1289. u32 bus_id, u32 offset,
  1290. u32 bus_id_delta)
  1291. {
  1292. wr_supp_res[if_id][bus_id].stage = PHASE_SHIFT;
  1293. if (ddr3_tip_xsb_compare_test(dev_num, if_id, bus_id,
  1294. 0, bus_id_delta) == MV_OK) {
  1295. wr_supp_res[if_id][bus_id].is_pup_fail = 0;
  1296. return MV_OK;
  1297. } else if (ddr3_tip_xsb_compare_test(dev_num, if_id, bus_id,
  1298. ONE_CLOCK_ERROR_SHIFT,
  1299. bus_id_delta) == MV_OK) {
  1300. /* 1 clock error */
  1301. wr_supp_res[if_id][bus_id].stage = CLOCK_SHIFT;
  1302. DEBUG_LEVELING(DEBUG_LEVEL_TRACE,
  1303. ("Supp: 1 error clock for if %d pup %d with ofsset %d success\n",
  1304. if_id, bus_id, offset));
  1305. ddr3_tip_wl_supp_one_clk_err_shift(dev_num, if_id, bus_id, 0);
  1306. wr_supp_res[if_id][bus_id].is_pup_fail = 0;
  1307. return MV_OK;
  1308. } else if (ddr3_tip_xsb_compare_test(dev_num, if_id, bus_id,
  1309. ALIGN_ERROR_SHIFT,
  1310. bus_id_delta) == MV_OK) {
  1311. /* align error */
  1312. DEBUG_LEVELING(DEBUG_LEVEL_TRACE,
  1313. ("Supp: align error for if %d pup %d with ofsset %d success\n",
  1314. if_id, bus_id, offset));
  1315. wr_supp_res[if_id][bus_id].stage = ALIGN_SHIFT;
  1316. ddr3_tip_wl_supp_align_err_shift(dev_num, if_id, bus_id, 0);
  1317. wr_supp_res[if_id][bus_id].is_pup_fail = 0;
  1318. return MV_OK;
  1319. } else {
  1320. wr_supp_res[if_id][bus_id].is_pup_fail = 1;
  1321. return MV_FAIL;
  1322. }
  1323. }
  1324. /*
  1325. * Compare Test
  1326. */
  1327. static int ddr3_tip_xsb_compare_test(u32 dev_num, u32 if_id, u32 bus_id,
  1328. u32 edge_offset, u32 bus_id_delta)
  1329. {
  1330. u32 num_of_succ_byte_compare, word_in_pattern, abs_offset;
  1331. u32 word_offset, i;
  1332. u32 read_pattern[TEST_PATTERN_LENGTH * 2];
  1333. struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
  1334. u32 pattern_test_pattern_table[8];
  1335. for (i = 0; i < 8; i++) {
  1336. pattern_test_pattern_table[i] =
  1337. pattern_table_get_word(dev_num, PATTERN_TEST, (u8)i);
  1338. }
  1339. /* extern write, than read and compare */
  1340. CHECK_STATUS(ddr3_tip_ext_write
  1341. (dev_num, if_id,
  1342. (pattern_table[PATTERN_TEST].start_addr +
  1343. ((SDRAM_CS_SIZE + 1) * effective_cs)), 1,
  1344. pattern_test_pattern_table));
  1345. CHECK_STATUS(ddr3_tip_reset_fifo_ptr(dev_num));
  1346. CHECK_STATUS(ddr3_tip_ext_read
  1347. (dev_num, if_id,
  1348. (pattern_table[PATTERN_TEST].start_addr +
  1349. ((SDRAM_CS_SIZE + 1) * effective_cs)), 1, read_pattern));
  1350. DEBUG_LEVELING(
  1351. DEBUG_LEVEL_TRACE,
  1352. ("XSB-compt: IF %d bus_id %d 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  1353. if_id, bus_id, read_pattern[0], read_pattern[1],
  1354. read_pattern[2], read_pattern[3], read_pattern[4],
  1355. read_pattern[5], read_pattern[6], read_pattern[7]));
  1356. /* compare byte per pup */
  1357. num_of_succ_byte_compare = 0;
  1358. for (word_in_pattern = start_xsb_offset;
  1359. word_in_pattern < (TEST_PATTERN_LENGTH * 2); word_in_pattern++) {
  1360. word_offset = word_in_pattern + edge_offset;
  1361. if ((word_offset > (TEST_PATTERN_LENGTH * 2 - 1)) ||
  1362. (word_offset < 0))
  1363. continue;
  1364. if ((read_pattern[word_in_pattern] & pup_mask_table[bus_id]) ==
  1365. (pattern_test_pattern_table[word_offset] &
  1366. pup_mask_table[bus_id]))
  1367. num_of_succ_byte_compare++;
  1368. }
  1369. abs_offset = (edge_offset > 0) ? edge_offset : -edge_offset;
  1370. if (num_of_succ_byte_compare == ((TEST_PATTERN_LENGTH * 2) -
  1371. abs_offset - start_xsb_offset)) {
  1372. DEBUG_LEVELING(
  1373. DEBUG_LEVEL_TRACE,
  1374. ("XSB-compt: IF %d bus_id %d num_of_succ_byte_compare %d - Success\n",
  1375. if_id, bus_id, num_of_succ_byte_compare));
  1376. return MV_OK;
  1377. } else {
  1378. DEBUG_LEVELING(
  1379. DEBUG_LEVEL_TRACE,
  1380. ("XSB-compt: IF %d bus_id %d num_of_succ_byte_compare %d - Fail !\n",
  1381. if_id, bus_id, num_of_succ_byte_compare));
  1382. DEBUG_LEVELING(
  1383. DEBUG_LEVEL_TRACE,
  1384. ("XSB-compt: expected 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  1385. pattern_test_pattern_table[0],
  1386. pattern_test_pattern_table[1],
  1387. pattern_test_pattern_table[2],
  1388. pattern_test_pattern_table[3],
  1389. pattern_test_pattern_table[4],
  1390. pattern_test_pattern_table[5],
  1391. pattern_test_pattern_table[6],
  1392. pattern_test_pattern_table[7]));
  1393. DEBUG_LEVELING(
  1394. DEBUG_LEVEL_TRACE,
  1395. ("XSB-compt: recieved 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
  1396. read_pattern[0], read_pattern[1],
  1397. read_pattern[2], read_pattern[3],
  1398. read_pattern[4], read_pattern[5],
  1399. read_pattern[6], read_pattern[7]));
  1400. DEBUG_LEVELING(
  1401. DEBUG_LEVEL_TRACE,
  1402. ("XSB-compt: IF %d bus_id %d num_of_succ_byte_compare %d - Fail !\n",
  1403. if_id, bus_id, num_of_succ_byte_compare));
  1404. return MV_FAIL;
  1405. }
  1406. }
  1407. /*
  1408. * Clock error shift - function moves the write leveling delay 1cc forward
  1409. */
  1410. static int ddr3_tip_wl_supp_one_clk_err_shift(u32 dev_num, u32 if_id,
  1411. u32 bus_id, u32 bus_id_delta)
  1412. {
  1413. int phase, adll;
  1414. u32 data;
  1415. DEBUG_LEVELING(DEBUG_LEVEL_TRACE, ("One_clk_err_shift\n"));
  1416. CHECK_STATUS(ddr3_tip_bus_read
  1417. (dev_num, if_id, ACCESS_TYPE_UNICAST, bus_id,
  1418. DDR_PHY_DATA, WL_PHY_REG, &data));
  1419. phase = ((data >> 6) & 0x7);
  1420. adll = data & 0x1f;
  1421. DEBUG_LEVELING(DEBUG_LEVEL_TRACE,
  1422. ("One_clk_err_shift: IF %d bus_id %d phase %d adll %d\n",
  1423. if_id, bus_id, phase, adll));
  1424. if ((phase == 0) || (phase == 1)) {
  1425. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1426. (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_id,
  1427. DDR_PHY_DATA, 0, (phase + 2), 0x1f));
  1428. } else if (phase == 2) {
  1429. if (adll < 6) {
  1430. data = (3 << 6) + (0x1f);
  1431. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1432. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1433. bus_id, DDR_PHY_DATA, 0, data,
  1434. (0x7 << 6 | 0x1f)));
  1435. data = 0x2f;
  1436. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1437. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1438. bus_id, DDR_PHY_DATA, 1, data, 0x3f));
  1439. }
  1440. } else {
  1441. /* phase 3 */
  1442. return MV_FAIL;
  1443. }
  1444. return MV_OK;
  1445. }
  1446. /*
  1447. * Align error shift
  1448. */
  1449. static int ddr3_tip_wl_supp_align_err_shift(u32 dev_num, u32 if_id,
  1450. u32 bus_id, u32 bus_id_delta)
  1451. {
  1452. int phase, adll;
  1453. u32 data;
  1454. /* Shift WL result 1 phase back */
  1455. CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id, ACCESS_TYPE_UNICAST,
  1456. bus_id, DDR_PHY_DATA, WL_PHY_REG,
  1457. &data));
  1458. phase = ((data >> 6) & 0x7);
  1459. adll = data & 0x1f;
  1460. DEBUG_LEVELING(
  1461. DEBUG_LEVEL_TRACE,
  1462. ("Wl_supp_align_err_shift: IF %d bus_id %d phase %d adll %d\n",
  1463. if_id, bus_id, phase, adll));
  1464. if (phase < 2) {
  1465. if (adll > 0x1a) {
  1466. if (phase == 0)
  1467. return MV_FAIL;
  1468. if (phase == 1) {
  1469. data = 0;
  1470. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1471. (dev_num, ACCESS_TYPE_UNICAST,
  1472. if_id, bus_id, DDR_PHY_DATA,
  1473. 0, data, (0x7 << 6 | 0x1f)));
  1474. data = 0xf;
  1475. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1476. (dev_num, ACCESS_TYPE_UNICAST,
  1477. if_id, bus_id, DDR_PHY_DATA,
  1478. 1, data, 0x1f));
  1479. return MV_OK;
  1480. }
  1481. } else {
  1482. return MV_FAIL;
  1483. }
  1484. } else if ((phase == 2) || (phase == 3)) {
  1485. phase = phase - 2;
  1486. data = (phase << 6) + (adll & 0x1f);
  1487. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1488. (dev_num, ACCESS_TYPE_UNICAST, if_id, bus_id,
  1489. DDR_PHY_DATA, 0, data, (0x7 << 6 | 0x1f)));
  1490. return MV_OK;
  1491. } else {
  1492. DEBUG_LEVELING(DEBUG_LEVEL_ERROR,
  1493. ("Wl_supp_align_err_shift: unexpected phase\n"));
  1494. return MV_FAIL;
  1495. }
  1496. return MV_OK;
  1497. }
  1498. /*
  1499. * Dynamic write leveling sequence
  1500. */
  1501. static int ddr3_tip_dynamic_write_leveling_seq(u32 dev_num)
  1502. {
  1503. u32 bus_id, dq_id;
  1504. u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  1505. u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
  1506. struct hws_topology_map *tm = ddr3_get_topology_map();
  1507. CHECK_STATUS(ddr3_tip_if_write
  1508. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1509. TRAINING_SW_2_REG, 0x1, 0x5));
  1510. CHECK_STATUS(ddr3_tip_if_write
  1511. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1512. TRAINING_WRITE_LEVELING_REG, 0x50, 0xff));
  1513. CHECK_STATUS(ddr3_tip_if_write
  1514. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1515. TRAINING_WRITE_LEVELING_REG, 0x5c, 0xff));
  1516. CHECK_STATUS(ddr3_tip_if_write
  1517. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1518. ODPG_TRAINING_CONTROL_REG, 0x381b82, 0x3c3faf));
  1519. CHECK_STATUS(ddr3_tip_if_write
  1520. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1521. ODPG_OBJ1_OPCODE_REG, (0x3 << 25), (0x3ffff << 9)));
  1522. CHECK_STATUS(ddr3_tip_if_write
  1523. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1524. ODPG_OBJ1_ITER_CNT_REG, 0x80, 0xffff));
  1525. CHECK_STATUS(ddr3_tip_if_write
  1526. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1527. ODPG_WRITE_LEVELING_DONE_CNTR_REG, 0x14, 0xff));
  1528. CHECK_STATUS(ddr3_tip_if_write
  1529. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1530. TRAINING_WRITE_LEVELING_REG, 0xff5c, 0xffff));
  1531. /* mask PBS */
  1532. for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) {
  1533. CHECK_STATUS(ddr3_tip_if_write
  1534. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1535. mask_results_dq_reg_map[dq_id], 0x1 << 24,
  1536. 0x1 << 24));
  1537. }
  1538. /* Mask all results */
  1539. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface; bus_id++) {
  1540. CHECK_STATUS(ddr3_tip_if_write
  1541. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1542. mask_results_pup_reg_map[bus_id], 0x1 << 24,
  1543. 0x1 << 24));
  1544. }
  1545. /* Unmask only wanted */
  1546. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface; bus_id++) {
  1547. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  1548. CHECK_STATUS(ddr3_tip_if_write
  1549. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1550. mask_results_pup_reg_map[bus_id], 0, 0x1 << 24));
  1551. }
  1552. CHECK_STATUS(ddr3_tip_if_write
  1553. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1554. WR_LEVELING_DQS_PATTERN_REG, 0x1, 0x1));
  1555. return MV_OK;
  1556. }
  1557. /*
  1558. * Dynamic read leveling sequence
  1559. */
  1560. static int ddr3_tip_dynamic_read_leveling_seq(u32 dev_num)
  1561. {
  1562. u32 bus_id, dq_id;
  1563. u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  1564. u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
  1565. struct hws_topology_map *tm = ddr3_get_topology_map();
  1566. /* mask PBS */
  1567. for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) {
  1568. CHECK_STATUS(ddr3_tip_if_write
  1569. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1570. mask_results_dq_reg_map[dq_id], 0x1 << 24,
  1571. 0x1 << 24));
  1572. }
  1573. /* Mask all results */
  1574. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface; bus_id++) {
  1575. CHECK_STATUS(ddr3_tip_if_write
  1576. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1577. mask_results_pup_reg_map[bus_id], 0x1 << 24,
  1578. 0x1 << 24));
  1579. }
  1580. /* Unmask only wanted */
  1581. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface; bus_id++) {
  1582. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  1583. CHECK_STATUS(ddr3_tip_if_write
  1584. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1585. mask_results_pup_reg_map[bus_id], 0, 0x1 << 24));
  1586. }
  1587. return MV_OK;
  1588. }
  1589. /*
  1590. * Dynamic read leveling sequence
  1591. */
  1592. static int ddr3_tip_dynamic_per_bit_read_leveling_seq(u32 dev_num)
  1593. {
  1594. u32 bus_id, dq_id;
  1595. u16 *mask_results_pup_reg_map = ddr3_tip_get_mask_results_pup_reg_map();
  1596. u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
  1597. struct hws_topology_map *tm = ddr3_get_topology_map();
  1598. /* mask PBS */
  1599. for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) {
  1600. CHECK_STATUS(ddr3_tip_if_write
  1601. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1602. mask_results_dq_reg_map[dq_id], 0x1 << 24,
  1603. 0x1 << 24));
  1604. }
  1605. /* Mask all results */
  1606. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface; bus_id++) {
  1607. CHECK_STATUS(ddr3_tip_if_write
  1608. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1609. mask_results_pup_reg_map[bus_id], 0x1 << 24,
  1610. 0x1 << 24));
  1611. }
  1612. /* Unmask only wanted */
  1613. for (dq_id = 0; dq_id < MAX_DQ_NUM; dq_id++) {
  1614. VALIDATE_ACTIVE(tm->bus_act_mask, dq_id / 8);
  1615. CHECK_STATUS(ddr3_tip_if_write
  1616. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1617. mask_results_dq_reg_map[dq_id], 0x0 << 24,
  1618. 0x1 << 24));
  1619. }
  1620. return MV_OK;
  1621. }
  1622. /*
  1623. * Print write leveling supplementary results
  1624. */
  1625. int ddr3_tip_print_wl_supp_result(u32 dev_num)
  1626. {
  1627. u32 bus_id = 0, if_id = 0;
  1628. struct hws_topology_map *tm = ddr3_get_topology_map();
  1629. DEBUG_LEVELING(DEBUG_LEVEL_INFO,
  1630. ("I/F0 PUP0 Result[0 - success, 1-fail] ...\n"));
  1631. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1632. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1633. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  1634. bus_id++) {
  1635. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  1636. DEBUG_LEVELING(DEBUG_LEVEL_INFO,
  1637. ("%d ,", wr_supp_res[if_id]
  1638. [bus_id].is_pup_fail));
  1639. }
  1640. }
  1641. DEBUG_LEVELING(
  1642. DEBUG_LEVEL_INFO,
  1643. ("I/F0 PUP0 Stage[0-phase_shift, 1-clock_shift, 2-align_shift] ...\n"));
  1644. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1645. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1646. for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
  1647. bus_id++) {
  1648. VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
  1649. DEBUG_LEVELING(DEBUG_LEVEL_INFO,
  1650. ("%d ,", wr_supp_res[if_id]
  1651. [bus_id].stage));
  1652. }
  1653. }
  1654. return MV_OK;
  1655. }