ddr3_training.c 75 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include <common.h>
  6. #include <spl.h>
  7. #include <asm/io.h>
  8. #include <asm/arch/cpu.h>
  9. #include <asm/arch/soc.h>
  10. #include "ddr3_init.h"
  11. #define GET_MAX_VALUE(x, y) \
  12. ((x) > (y)) ? (x) : (y)
  13. #define CEIL_DIVIDE(x, y) \
  14. ((x - (x / y) * y) == 0) ? ((x / y) - 1) : (x / y)
  15. #define TIME_2_CLOCK_CYCLES CEIL_DIVIDE
  16. #define GET_CS_FROM_MASK(mask) (cs_mask2_num[mask])
  17. #define CS_CBE_VALUE(cs_num) (cs_cbe_reg[cs_num])
  18. #define TIMES_9_TREFI_CYCLES 0x8
  19. u32 window_mem_addr = 0;
  20. u32 phy_reg0_val = 0;
  21. u32 phy_reg1_val = 8;
  22. u32 phy_reg2_val = 0;
  23. u32 phy_reg3_val = 0xa;
  24. enum hws_ddr_freq init_freq = DDR_FREQ_667;
  25. enum hws_ddr_freq low_freq = DDR_FREQ_LOW_FREQ;
  26. enum hws_ddr_freq medium_freq;
  27. u32 debug_dunit = 0;
  28. u32 odt_additional = 1;
  29. u32 *dq_map_table = NULL;
  30. u32 odt_config = 1;
  31. #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ALLEYCAT3) || \
  32. defined(CONFIG_ARMADA_39X)
  33. u32 is_pll_before_init = 0, is_adll_calib_before_init = 0, is_dfs_in_init = 0;
  34. u32 dfs_low_freq = 130;
  35. #else
  36. u32 is_pll_before_init = 0, is_adll_calib_before_init = 1, is_dfs_in_init = 0;
  37. u32 dfs_low_freq = 100;
  38. #endif
  39. u32 g_rtt_nom_c_s0, g_rtt_nom_c_s1;
  40. u8 calibration_update_control; /* 2 external only, 1 is internal only */
  41. enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
  42. enum auto_tune_stage training_stage = INIT_CONTROLLER;
  43. u32 finger_test = 0, p_finger_start = 11, p_finger_end = 64,
  44. n_finger_start = 11, n_finger_end = 64,
  45. p_finger_step = 3, n_finger_step = 3;
  46. u32 clamp_tbl[] = { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 };
  47. /* Initiate to 0xff, this variable is define by user in debug mode */
  48. u32 mode2_t = 0xff;
  49. u32 xsb_validate_type = 0;
  50. u32 xsb_validation_base_address = 0xf000;
  51. u32 first_active_if = 0;
  52. u32 dfs_low_phy1 = 0x1f;
  53. u32 multicast_id = 0;
  54. int use_broadcast = 0;
  55. struct hws_tip_freq_config_info *freq_info_table = NULL;
  56. u8 is_cbe_required = 0;
  57. u32 debug_mode = 0;
  58. u32 delay_enable = 0;
  59. int rl_mid_freq_wa = 0;
  60. u32 effective_cs = 0;
  61. u32 mask_tune_func = (SET_MEDIUM_FREQ_MASK_BIT |
  62. WRITE_LEVELING_MASK_BIT |
  63. LOAD_PATTERN_2_MASK_BIT |
  64. READ_LEVELING_MASK_BIT |
  65. SET_TARGET_FREQ_MASK_BIT | WRITE_LEVELING_TF_MASK_BIT |
  66. READ_LEVELING_TF_MASK_BIT |
  67. CENTRALIZATION_RX_MASK_BIT | CENTRALIZATION_TX_MASK_BIT);
  68. void ddr3_print_version(void)
  69. {
  70. printf(DDR3_TIP_VERSION_STRING);
  71. }
  72. static int ddr3_tip_ddr3_training_main_flow(u32 dev_num);
  73. static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type,
  74. u32 if_id, u32 cl_value, u32 cwl_value);
  75. static int ddr3_tip_ddr3_auto_tune(u32 dev_num);
  76. static int is_bus_access_done(u32 dev_num, u32 if_id,
  77. u32 dunit_reg_adrr, u32 bit);
  78. #ifdef ODT_TEST_SUPPORT
  79. static int odt_test(u32 dev_num, enum hws_algo_type algo_type);
  80. #endif
  81. int adll_calibration(u32 dev_num, enum hws_access_type access_type,
  82. u32 if_id, enum hws_ddr_freq frequency);
  83. static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
  84. u32 if_id, enum hws_ddr_freq frequency);
  85. static struct page_element page_param[] = {
  86. /*
  87. * 8bits 16 bits
  88. * page-size(K) page-size(K) mask
  89. */
  90. { 1, 2, 2},
  91. /* 512M */
  92. { 1, 2, 3},
  93. /* 1G */
  94. { 1, 2, 0},
  95. /* 2G */
  96. { 1, 2, 4},
  97. /* 4G */
  98. { 2, 2, 5}
  99. /* 8G */
  100. };
  101. static u8 mem_size_config[MEM_SIZE_LAST] = {
  102. 0x2, /* 512Mbit */
  103. 0x3, /* 1Gbit */
  104. 0x0, /* 2Gbit */
  105. 0x4, /* 4Gbit */
  106. 0x5 /* 8Gbit */
  107. };
  108. static u8 cs_mask2_num[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3 };
  109. static struct reg_data odpg_default_value[] = {
  110. {0x1034, 0x38000, MASK_ALL_BITS},
  111. {0x1038, 0x0, MASK_ALL_BITS},
  112. {0x10b0, 0x0, MASK_ALL_BITS},
  113. {0x10b8, 0x0, MASK_ALL_BITS},
  114. {0x10c0, 0x0, MASK_ALL_BITS},
  115. {0x10f0, 0x0, MASK_ALL_BITS},
  116. {0x10f4, 0x0, MASK_ALL_BITS},
  117. {0x10f8, 0xff, MASK_ALL_BITS},
  118. {0x10fc, 0xffff, MASK_ALL_BITS},
  119. {0x1130, 0x0, MASK_ALL_BITS},
  120. {0x1830, 0x2000000, MASK_ALL_BITS},
  121. {0x14d0, 0x0, MASK_ALL_BITS},
  122. {0x14d4, 0x0, MASK_ALL_BITS},
  123. {0x14d8, 0x0, MASK_ALL_BITS},
  124. {0x14dc, 0x0, MASK_ALL_BITS},
  125. {0x1454, 0x0, MASK_ALL_BITS},
  126. {0x1594, 0x0, MASK_ALL_BITS},
  127. {0x1598, 0x0, MASK_ALL_BITS},
  128. {0x159c, 0x0, MASK_ALL_BITS},
  129. {0x15a0, 0x0, MASK_ALL_BITS},
  130. {0x15a4, 0x0, MASK_ALL_BITS},
  131. {0x15a8, 0x0, MASK_ALL_BITS},
  132. {0x15ac, 0x0, MASK_ALL_BITS},
  133. {0x1604, 0x0, MASK_ALL_BITS},
  134. {0x1608, 0x0, MASK_ALL_BITS},
  135. {0x160c, 0x0, MASK_ALL_BITS},
  136. {0x1610, 0x0, MASK_ALL_BITS},
  137. {0x1614, 0x0, MASK_ALL_BITS},
  138. {0x1618, 0x0, MASK_ALL_BITS},
  139. {0x1624, 0x0, MASK_ALL_BITS},
  140. {0x1690, 0x0, MASK_ALL_BITS},
  141. {0x1694, 0x0, MASK_ALL_BITS},
  142. {0x1698, 0x0, MASK_ALL_BITS},
  143. {0x169c, 0x0, MASK_ALL_BITS},
  144. {0x14b8, 0x6f67, MASK_ALL_BITS},
  145. {0x1630, 0x0, MASK_ALL_BITS},
  146. {0x1634, 0x0, MASK_ALL_BITS},
  147. {0x1638, 0x0, MASK_ALL_BITS},
  148. {0x163c, 0x0, MASK_ALL_BITS},
  149. {0x16b0, 0x0, MASK_ALL_BITS},
  150. {0x16b4, 0x0, MASK_ALL_BITS},
  151. {0x16b8, 0x0, MASK_ALL_BITS},
  152. {0x16bc, 0x0, MASK_ALL_BITS},
  153. {0x16c0, 0x0, MASK_ALL_BITS},
  154. {0x16c4, 0x0, MASK_ALL_BITS},
  155. {0x16c8, 0x0, MASK_ALL_BITS},
  156. {0x16cc, 0x1, MASK_ALL_BITS},
  157. {0x16f0, 0x1, MASK_ALL_BITS},
  158. {0x16f4, 0x0, MASK_ALL_BITS},
  159. {0x16f8, 0x0, MASK_ALL_BITS},
  160. {0x16fc, 0x0, MASK_ALL_BITS}
  161. };
  162. static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access,
  163. u32 if_id, enum hws_access_type phy_access,
  164. u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr,
  165. u32 data_value, enum hws_operation oper_type);
  166. static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id);
  167. static int ddr3_tip_rank_control(u32 dev_num, u32 if_id);
  168. /*
  169. * Update global training parameters by data from user
  170. */
  171. int ddr3_tip_tune_training_params(u32 dev_num,
  172. struct tune_train_params *params)
  173. {
  174. if (params->ck_delay != -1)
  175. ck_delay = params->ck_delay;
  176. if (params->ck_delay_16 != -1)
  177. ck_delay_16 = params->ck_delay_16;
  178. if (params->phy_reg3_val != -1)
  179. phy_reg3_val = params->phy_reg3_val;
  180. return MV_OK;
  181. }
  182. /*
  183. * Configure CS
  184. */
  185. int ddr3_tip_configure_cs(u32 dev_num, u32 if_id, u32 cs_num, u32 enable)
  186. {
  187. u32 data, addr_hi, data_high;
  188. u32 mem_index;
  189. struct hws_topology_map *tm = ddr3_get_topology_map();
  190. if (enable == 1) {
  191. data = (tm->interface_params[if_id].bus_width ==
  192. BUS_WIDTH_8) ? 0 : 1;
  193. CHECK_STATUS(ddr3_tip_if_write
  194. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  195. SDRAM_ACCESS_CONTROL_REG, (data << (cs_num * 4)),
  196. 0x3 << (cs_num * 4)));
  197. mem_index = tm->interface_params[if_id].memory_size;
  198. addr_hi = mem_size_config[mem_index] & 0x3;
  199. CHECK_STATUS(ddr3_tip_if_write
  200. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  201. SDRAM_ACCESS_CONTROL_REG,
  202. (addr_hi << (2 + cs_num * 4)),
  203. 0x3 << (2 + cs_num * 4)));
  204. data_high = (mem_size_config[mem_index] & 0x4) >> 2;
  205. CHECK_STATUS(ddr3_tip_if_write
  206. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  207. SDRAM_ACCESS_CONTROL_REG,
  208. data_high << (20 + cs_num), 1 << (20 + cs_num)));
  209. /* Enable Address Select Mode */
  210. CHECK_STATUS(ddr3_tip_if_write
  211. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  212. SDRAM_ACCESS_CONTROL_REG, 1 << (16 + cs_num),
  213. 1 << (16 + cs_num)));
  214. }
  215. switch (cs_num) {
  216. case 0:
  217. case 1:
  218. case 2:
  219. CHECK_STATUS(ddr3_tip_if_write
  220. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  221. DDR_CONTROL_LOW_REG, (enable << (cs_num + 11)),
  222. 1 << (cs_num + 11)));
  223. break;
  224. case 3:
  225. CHECK_STATUS(ddr3_tip_if_write
  226. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  227. DDR_CONTROL_LOW_REG, (enable << 15), 1 << 15));
  228. break;
  229. }
  230. return MV_OK;
  231. }
  232. /*
  233. * Calculate number of CS
  234. */
  235. static int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num)
  236. {
  237. u32 cs;
  238. u32 bus_cnt;
  239. u32 cs_count;
  240. u32 cs_bitmask;
  241. u32 curr_cs_num = 0;
  242. struct hws_topology_map *tm = ddr3_get_topology_map();
  243. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) {
  244. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  245. cs_count = 0;
  246. cs_bitmask = tm->interface_params[if_id].
  247. as_bus_params[bus_cnt].cs_bitmask;
  248. for (cs = 0; cs < MAX_CS_NUM; cs++) {
  249. if ((cs_bitmask >> cs) & 1)
  250. cs_count++;
  251. }
  252. if (curr_cs_num == 0) {
  253. curr_cs_num = cs_count;
  254. } else if (cs_count != curr_cs_num) {
  255. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  256. ("CS number is different per bus (IF %d BUS %d cs_num %d curr_cs_num %d)\n",
  257. if_id, bus_cnt, cs_count,
  258. curr_cs_num));
  259. return MV_NOT_SUPPORTED;
  260. }
  261. }
  262. *cs_num = curr_cs_num;
  263. return MV_OK;
  264. }
  265. /*
  266. * Init Controller Flow
  267. */
  268. int hws_ddr3_tip_init_controller(u32 dev_num, struct init_cntr_param *init_cntr_prm)
  269. {
  270. u32 if_id;
  271. u32 cs_num;
  272. u32 t_refi = 0, t_hclk = 0, t_ckclk = 0, t_faw = 0, t_pd = 0,
  273. t_wr = 0, t2t = 0, txpdll = 0;
  274. u32 data_value = 0, bus_width = 0, page_size = 0, cs_cnt = 0,
  275. mem_mask = 0, bus_index = 0;
  276. enum hws_speed_bin speed_bin_index = SPEED_BIN_DDR_2133N;
  277. enum hws_mem_size memory_size = MEM_2G;
  278. enum hws_ddr_freq freq = init_freq;
  279. enum hws_timing timing;
  280. u32 cs_mask = 0;
  281. u32 cl_value = 0, cwl_val = 0;
  282. u32 refresh_interval_cnt = 0, bus_cnt = 0, adll_tap = 0;
  283. enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
  284. u32 data_read[MAX_INTERFACE_NUM];
  285. struct hws_topology_map *tm = ddr3_get_topology_map();
  286. u32 odt_config = g_odt_config_2cs;
  287. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  288. ("Init_controller, do_mrs_phy=%d, is_ctrl64_bit=%d\n",
  289. init_cntr_prm->do_mrs_phy,
  290. init_cntr_prm->is_ctrl64_bit));
  291. if (init_cntr_prm->init_phy == 1) {
  292. CHECK_STATUS(ddr3_tip_configure_phy(dev_num));
  293. }
  294. if (generic_init_controller == 1) {
  295. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  296. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  297. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  298. ("active IF %d\n", if_id));
  299. mem_mask = 0;
  300. for (bus_index = 0;
  301. bus_index < GET_TOPOLOGY_NUM_OF_BUSES();
  302. bus_index++) {
  303. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  304. mem_mask |=
  305. tm->interface_params[if_id].
  306. as_bus_params[bus_index].mirror_enable_bitmask;
  307. }
  308. if (mem_mask != 0) {
  309. CHECK_STATUS(ddr3_tip_if_write
  310. (dev_num, ACCESS_TYPE_MULTICAST,
  311. if_id, CS_ENABLE_REG, 0,
  312. 0x8));
  313. }
  314. memory_size =
  315. tm->interface_params[if_id].
  316. memory_size;
  317. speed_bin_index =
  318. tm->interface_params[if_id].
  319. speed_bin_index;
  320. freq = init_freq;
  321. t_refi =
  322. (tm->interface_params[if_id].
  323. interface_temp ==
  324. HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW;
  325. t_refi *= 1000; /* psec */
  326. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  327. ("memy_size %d speed_bin_ind %d freq %d t_refi %d\n",
  328. memory_size, speed_bin_index, freq,
  329. t_refi));
  330. /* HCLK & CK CLK in 2:1[ps] */
  331. /* t_ckclk is external clock */
  332. t_ckclk = (MEGA / freq_val[freq]);
  333. /* t_hclk is internal clock */
  334. t_hclk = 2 * t_ckclk;
  335. refresh_interval_cnt = t_refi / t_hclk; /* no units */
  336. bus_width =
  337. (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)
  338. == 1) ? (16) : (32);
  339. if (init_cntr_prm->is_ctrl64_bit)
  340. bus_width = 64;
  341. data_value =
  342. (refresh_interval_cnt | 0x4000 |
  343. ((bus_width ==
  344. 32) ? 0x8000 : 0) | 0x1000000) & ~(1 << 26);
  345. /* Interface Bus Width */
  346. /* SRMode */
  347. CHECK_STATUS(ddr3_tip_if_write
  348. (dev_num, access_type, if_id,
  349. SDRAM_CONFIGURATION_REG, data_value,
  350. 0x100ffff));
  351. /* Interleave first command pre-charge enable (TBD) */
  352. CHECK_STATUS(ddr3_tip_if_write
  353. (dev_num, access_type, if_id,
  354. SDRAM_OPEN_PAGE_CONTROL_REG, (1 << 10),
  355. (1 << 10)));
  356. /* PHY configuration */
  357. /*
  358. * Postamble Length = 1.5cc, Addresscntl to clk skew
  359. * \BD, Preamble length normal, parralal ADLL enable
  360. */
  361. CHECK_STATUS(ddr3_tip_if_write
  362. (dev_num, access_type, if_id,
  363. DRAM_PHY_CONFIGURATION, 0x28, 0x3e));
  364. if (init_cntr_prm->is_ctrl64_bit) {
  365. /* positive edge */
  366. CHECK_STATUS(ddr3_tip_if_write
  367. (dev_num, access_type, if_id,
  368. DRAM_PHY_CONFIGURATION, 0x0,
  369. 0xff80));
  370. }
  371. /* calibration block disable */
  372. /* Xbar Read buffer select (for Internal access) */
  373. CHECK_STATUS(ddr3_tip_if_write
  374. (dev_num, access_type, if_id,
  375. CALIB_MACHINE_CTRL_REG, 0x1200c,
  376. 0x7dffe01c));
  377. CHECK_STATUS(ddr3_tip_if_write
  378. (dev_num, access_type, if_id,
  379. CALIB_MACHINE_CTRL_REG,
  380. calibration_update_control << 3, 0x3 << 3));
  381. /* Pad calibration control - enable */
  382. CHECK_STATUS(ddr3_tip_if_write
  383. (dev_num, access_type, if_id,
  384. CALIB_MACHINE_CTRL_REG, 0x1, 0x1));
  385. cs_mask = 0;
  386. data_value = 0x7;
  387. /*
  388. * Address ctrl \96 Part of the Generic code
  389. * The next configuration is done:
  390. * 1) Memory Size
  391. * 2) Bus_width
  392. * 3) CS#
  393. * 4) Page Number
  394. * 5) t_faw
  395. * Per Dunit get from the Map_topology the parameters:
  396. * Bus_width
  397. * t_faw is per Dunit not per CS
  398. */
  399. page_size =
  400. (tm->interface_params[if_id].
  401. bus_width ==
  402. BUS_WIDTH_8) ? page_param[memory_size].
  403. page_size_8bit : page_param[memory_size].
  404. page_size_16bit;
  405. t_faw =
  406. (page_size == 1) ? speed_bin_table(speed_bin_index,
  407. SPEED_BIN_TFAW1K)
  408. : speed_bin_table(speed_bin_index,
  409. SPEED_BIN_TFAW2K);
  410. data_value = TIME_2_CLOCK_CYCLES(t_faw, t_ckclk);
  411. data_value = data_value << 24;
  412. CHECK_STATUS(ddr3_tip_if_write
  413. (dev_num, access_type, if_id,
  414. SDRAM_ACCESS_CONTROL_REG, data_value,
  415. 0x7f000000));
  416. data_value =
  417. (tm->interface_params[if_id].
  418. bus_width == BUS_WIDTH_8) ? 0 : 1;
  419. /* create merge cs mask for all cs available in dunit */
  420. for (bus_cnt = 0;
  421. bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES();
  422. bus_cnt++) {
  423. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  424. cs_mask |=
  425. tm->interface_params[if_id].
  426. as_bus_params[bus_cnt].cs_bitmask;
  427. }
  428. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  429. ("Init_controller IF %d cs_mask %d\n",
  430. if_id, cs_mask));
  431. /*
  432. * Configure the next upon the Map Topology \96 If the
  433. * Dunit is CS0 Configure CS0 if it is multi CS
  434. * configure them both: The Bust_width it\92s the
  435. * Memory Bus width \96 x8 or x16
  436. */
  437. for (cs_cnt = 0; cs_cnt < NUM_OF_CS; cs_cnt++) {
  438. ddr3_tip_configure_cs(dev_num, if_id, cs_cnt,
  439. ((cs_mask & (1 << cs_cnt)) ? 1
  440. : 0));
  441. }
  442. if (init_cntr_prm->do_mrs_phy) {
  443. /*
  444. * MR0 \96 Part of the Generic code
  445. * The next configuration is done:
  446. * 1) Burst Length
  447. * 2) CAS Latency
  448. * get for each dunit what is it Speed_bin &
  449. * Target Frequency. From those both parameters
  450. * get the appropriate Cas_l from the CL table
  451. */
  452. cl_value =
  453. tm->interface_params[if_id].
  454. cas_l;
  455. cwl_val =
  456. tm->interface_params[if_id].
  457. cas_wl;
  458. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  459. ("cl_value 0x%x cwl_val 0x%x\n",
  460. cl_value, cwl_val));
  461. t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  462. SPEED_BIN_TWR),
  463. t_ckclk);
  464. data_value =
  465. ((cl_mask_table[cl_value] & 0x1) << 2) |
  466. ((cl_mask_table[cl_value] & 0xe) << 3);
  467. CHECK_STATUS(ddr3_tip_if_write
  468. (dev_num, access_type, if_id,
  469. MR0_REG, data_value,
  470. (0x7 << 4) | (1 << 2)));
  471. CHECK_STATUS(ddr3_tip_if_write
  472. (dev_num, access_type, if_id,
  473. MR0_REG, twr_mask_table[t_wr + 1] << 9,
  474. (0x7 << 9)));
  475. /*
  476. * MR1: Set RTT and DIC Design GL values
  477. * configured by user
  478. */
  479. CHECK_STATUS(ddr3_tip_if_write
  480. (dev_num, ACCESS_TYPE_MULTICAST,
  481. PARAM_NOT_CARE, MR1_REG,
  482. g_dic | g_rtt_nom, 0x266));
  483. /* MR2 - Part of the Generic code */
  484. /*
  485. * The next configuration is done:
  486. * 1) SRT
  487. * 2) CAS Write Latency
  488. */
  489. data_value = (cwl_mask_table[cwl_val] << 3);
  490. data_value |=
  491. ((tm->interface_params[if_id].
  492. interface_temp ==
  493. HWS_TEMP_HIGH) ? (1 << 7) : 0);
  494. CHECK_STATUS(ddr3_tip_if_write
  495. (dev_num, access_type, if_id,
  496. MR2_REG, data_value,
  497. (0x7 << 3) | (0x1 << 7) | (0x3 <<
  498. 9)));
  499. }
  500. ddr3_tip_write_odt(dev_num, access_type, if_id,
  501. cl_value, cwl_val);
  502. ddr3_tip_set_timing(dev_num, access_type, if_id, freq);
  503. CHECK_STATUS(ddr3_tip_if_write
  504. (dev_num, access_type, if_id,
  505. DUNIT_CONTROL_HIGH_REG, 0x177,
  506. 0x1000177));
  507. if (init_cntr_prm->is_ctrl64_bit) {
  508. /* disable 0.25 cc delay */
  509. CHECK_STATUS(ddr3_tip_if_write
  510. (dev_num, access_type, if_id,
  511. DUNIT_CONTROL_HIGH_REG, 0x0,
  512. 0x800));
  513. }
  514. /* reset bit 7 */
  515. CHECK_STATUS(ddr3_tip_if_write
  516. (dev_num, access_type, if_id,
  517. DUNIT_CONTROL_HIGH_REG,
  518. (init_cntr_prm->msys_init << 7), (1 << 7)));
  519. /* calculate number of CS (per interface) */
  520. CHECK_STATUS(calc_cs_num
  521. (dev_num, if_id, &cs_num));
  522. timing = tm->interface_params[if_id].timing;
  523. if (mode2_t != 0xff) {
  524. t2t = mode2_t;
  525. } else if (timing != HWS_TIM_DEFAULT) {
  526. /* Board topology map is forcing timing */
  527. t2t = (timing == HWS_TIM_2T) ? 1 : 0;
  528. } else {
  529. t2t = (cs_num == 1) ? 0 : 1;
  530. }
  531. CHECK_STATUS(ddr3_tip_if_write
  532. (dev_num, access_type, if_id,
  533. DDR_CONTROL_LOW_REG, t2t << 3,
  534. 0x3 << 3));
  535. /* move the block to ddr3_tip_set_timing - start */
  536. t_pd = TIMES_9_TREFI_CYCLES;
  537. txpdll = GET_MAX_VALUE(t_ckclk * 10,
  538. speed_bin_table(speed_bin_index,
  539. SPEED_BIN_TXPDLL));
  540. txpdll = CEIL_DIVIDE((txpdll - 1), t_ckclk);
  541. CHECK_STATUS(ddr3_tip_if_write
  542. (dev_num, access_type, if_id,
  543. DDR_TIMING_REG, txpdll << 4 | t_pd,
  544. 0x1f << 4 | 0xf));
  545. CHECK_STATUS(ddr3_tip_if_write
  546. (dev_num, access_type, if_id,
  547. DDR_TIMING_REG, 0x28 << 9, 0x3f << 9));
  548. CHECK_STATUS(ddr3_tip_if_write
  549. (dev_num, access_type, if_id,
  550. DDR_TIMING_REG, 0xa << 21, 0xff << 21));
  551. /* move the block to ddr3_tip_set_timing - end */
  552. /* AUTO_ZQC_TIMING */
  553. CHECK_STATUS(ddr3_tip_if_write
  554. (dev_num, access_type, if_id,
  555. TIMING_REG, (AUTO_ZQC_TIMING | (2 << 20)),
  556. 0x3fffff));
  557. CHECK_STATUS(ddr3_tip_if_read
  558. (dev_num, access_type, if_id,
  559. DRAM_PHY_CONFIGURATION, data_read, 0x30));
  560. data_value =
  561. (data_read[if_id] == 0) ? (1 << 11) : 0;
  562. CHECK_STATUS(ddr3_tip_if_write
  563. (dev_num, access_type, if_id,
  564. DUNIT_CONTROL_HIGH_REG, data_value,
  565. (1 << 11)));
  566. /* Set Active control for ODT write transactions */
  567. if (cs_num == 1)
  568. odt_config = g_odt_config_1cs;
  569. CHECK_STATUS(ddr3_tip_if_write
  570. (dev_num, ACCESS_TYPE_MULTICAST,
  571. PARAM_NOT_CARE, 0x1494, odt_config,
  572. MASK_ALL_BITS));
  573. }
  574. } else {
  575. #ifdef STATIC_ALGO_SUPPORT
  576. CHECK_STATUS(ddr3_tip_static_init_controller(dev_num));
  577. #if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
  578. CHECK_STATUS(ddr3_tip_static_phy_init_controller(dev_num));
  579. #endif
  580. #endif /* STATIC_ALGO_SUPPORT */
  581. }
  582. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  583. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  584. CHECK_STATUS(ddr3_tip_rank_control(dev_num, if_id));
  585. if (init_cntr_prm->do_mrs_phy) {
  586. CHECK_STATUS(ddr3_tip_pad_inv(dev_num, if_id));
  587. }
  588. /* Pad calibration control - disable */
  589. CHECK_STATUS(ddr3_tip_if_write
  590. (dev_num, access_type, if_id,
  591. CALIB_MACHINE_CTRL_REG, 0x0, 0x1));
  592. CHECK_STATUS(ddr3_tip_if_write
  593. (dev_num, access_type, if_id,
  594. CALIB_MACHINE_CTRL_REG,
  595. calibration_update_control << 3, 0x3 << 3));
  596. }
  597. CHECK_STATUS(ddr3_tip_enable_init_sequence(dev_num));
  598. if (delay_enable != 0) {
  599. adll_tap = MEGA / (freq_val[freq] * 64);
  600. ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap);
  601. }
  602. return MV_OK;
  603. }
  604. /*
  605. * Load Topology map
  606. */
  607. int hws_ddr3_tip_load_topology_map(u32 dev_num, struct hws_topology_map *tm)
  608. {
  609. enum hws_speed_bin speed_bin_index;
  610. enum hws_ddr_freq freq = DDR_FREQ_LIMIT;
  611. u32 if_id;
  612. freq_val[DDR_FREQ_LOW_FREQ] = dfs_low_freq;
  613. tm = ddr3_get_topology_map();
  614. CHECK_STATUS(ddr3_tip_get_first_active_if
  615. ((u8)dev_num, tm->if_act_mask,
  616. &first_active_if));
  617. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  618. ("board IF_Mask=0x%x num_of_bus_per_interface=0x%x\n",
  619. tm->if_act_mask,
  620. tm->num_of_bus_per_interface));
  621. /*
  622. * if CL, CWL values are missing in topology map, then fill them
  623. * according to speedbin tables
  624. */
  625. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  626. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  627. speed_bin_index =
  628. tm->interface_params[if_id].speed_bin_index;
  629. /* TBD memory frequency of interface 0 only is used ! */
  630. freq = tm->interface_params[first_active_if].memory_freq;
  631. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  632. ("speed_bin_index =%d freq=%d cl=%d cwl=%d\n",
  633. speed_bin_index, freq_val[freq],
  634. tm->interface_params[if_id].
  635. cas_l,
  636. tm->interface_params[if_id].
  637. cas_wl));
  638. if (tm->interface_params[if_id].cas_l == 0) {
  639. tm->interface_params[if_id].cas_l =
  640. cas_latency_table[speed_bin_index].cl_val[freq];
  641. }
  642. if (tm->interface_params[if_id].cas_wl == 0) {
  643. tm->interface_params[if_id].cas_wl =
  644. cas_write_latency_table[speed_bin_index].cl_val[freq];
  645. }
  646. }
  647. return MV_OK;
  648. }
  649. /*
  650. * RANK Control Flow
  651. */
  652. static int ddr3_tip_rank_control(u32 dev_num, u32 if_id)
  653. {
  654. u32 data_value = 0, bus_cnt;
  655. struct hws_topology_map *tm = ddr3_get_topology_map();
  656. for (bus_cnt = 1; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) {
  657. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  658. if ((tm->interface_params[if_id].
  659. as_bus_params[0].cs_bitmask !=
  660. tm->interface_params[if_id].
  661. as_bus_params[bus_cnt].cs_bitmask) ||
  662. (tm->interface_params[if_id].
  663. as_bus_params[0].mirror_enable_bitmask !=
  664. tm->interface_params[if_id].
  665. as_bus_params[bus_cnt].mirror_enable_bitmask))
  666. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  667. ("WARNING:Wrong configuration for pup #%d CS mask and CS mirroring for all pups should be the same\n",
  668. bus_cnt));
  669. }
  670. data_value |= tm->interface_params[if_id].
  671. as_bus_params[0].cs_bitmask;
  672. data_value |= tm->interface_params[if_id].
  673. as_bus_params[0].mirror_enable_bitmask << 4;
  674. CHECK_STATUS(ddr3_tip_if_write
  675. (dev_num, ACCESS_TYPE_UNICAST, if_id, RANK_CTRL_REG,
  676. data_value, 0xff));
  677. return MV_OK;
  678. }
  679. /*
  680. * PAD Inverse Flow
  681. */
  682. static int ddr3_tip_pad_inv(u32 dev_num, u32 if_id)
  683. {
  684. u32 bus_cnt, data_value, ck_swap_pup_ctrl;
  685. struct hws_topology_map *tm = ddr3_get_topology_map();
  686. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) {
  687. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  688. if (tm->interface_params[if_id].
  689. as_bus_params[bus_cnt].is_dqs_swap == 1) {
  690. /* dqs swap */
  691. ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST,
  692. if_id, bus_cnt,
  693. DDR_PHY_DATA,
  694. PHY_CONTROL_PHY_REG, 0xc0,
  695. 0xc0);
  696. }
  697. if (tm->interface_params[if_id].
  698. as_bus_params[bus_cnt].is_ck_swap == 1) {
  699. if (bus_cnt <= 1)
  700. data_value = 0x5 << 2;
  701. else
  702. data_value = 0xa << 2;
  703. /* mask equals data */
  704. /* ck swap pup is only control pup #0 ! */
  705. ck_swap_pup_ctrl = 0;
  706. ddr3_tip_bus_read_modify_write(dev_num, ACCESS_TYPE_UNICAST,
  707. if_id, ck_swap_pup_ctrl,
  708. DDR_PHY_CONTROL,
  709. PHY_CONTROL_PHY_REG,
  710. data_value, data_value);
  711. }
  712. }
  713. return MV_OK;
  714. }
  715. /*
  716. * Run Training Flow
  717. */
  718. int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type)
  719. {
  720. int ret = MV_OK, ret_tune = MV_OK;
  721. #ifdef ODT_TEST_SUPPORT
  722. if (finger_test == 1)
  723. return odt_test(dev_num, algo_type);
  724. #endif
  725. if (algo_type == ALGO_TYPE_DYNAMIC) {
  726. ret = ddr3_tip_ddr3_auto_tune(dev_num);
  727. } else {
  728. #ifdef STATIC_ALGO_SUPPORT
  729. {
  730. enum hws_ddr_freq freq;
  731. freq = init_freq;
  732. /* add to mask */
  733. if (is_adll_calib_before_init != 0) {
  734. printf("with adll calib before init\n");
  735. adll_calibration(dev_num, ACCESS_TYPE_MULTICAST,
  736. 0, freq);
  737. }
  738. /*
  739. * Frequency per interface is not relevant,
  740. * only interface 0
  741. */
  742. ret = ddr3_tip_run_static_alg(dev_num,
  743. freq);
  744. }
  745. #endif
  746. }
  747. if (ret != MV_OK) {
  748. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  749. ("Run_alg: tuning failed %d\n", ret_tune));
  750. }
  751. return ret;
  752. }
  753. #ifdef ODT_TEST_SUPPORT
  754. /*
  755. * ODT Test
  756. */
  757. static int odt_test(u32 dev_num, enum hws_algo_type algo_type)
  758. {
  759. int ret = MV_OK, ret_tune = MV_OK;
  760. int pfinger_val = 0, nfinger_val;
  761. for (pfinger_val = p_finger_start; pfinger_val <= p_finger_end;
  762. pfinger_val += p_finger_step) {
  763. for (nfinger_val = n_finger_start; nfinger_val <= n_finger_end;
  764. nfinger_val += n_finger_step) {
  765. if (finger_test != 0) {
  766. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  767. ("pfinger_val %d nfinger_val %d\n",
  768. pfinger_val, nfinger_val));
  769. p_finger = pfinger_val;
  770. n_finger = nfinger_val;
  771. }
  772. if (algo_type == ALGO_TYPE_DYNAMIC) {
  773. ret = ddr3_tip_ddr3_auto_tune(dev_num);
  774. } else {
  775. /*
  776. * Frequency per interface is not relevant,
  777. * only interface 0
  778. */
  779. ret = ddr3_tip_run_static_alg(dev_num,
  780. init_freq);
  781. }
  782. }
  783. }
  784. if (ret_tune != MV_OK) {
  785. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  786. ("Run_alg: tuning failed %d\n", ret_tune));
  787. ret = (ret == MV_OK) ? ret_tune : ret;
  788. }
  789. return ret;
  790. }
  791. #endif
  792. /*
  793. * Select Controller
  794. */
  795. int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable)
  796. {
  797. if (config_func_info[dev_num].tip_dunit_mux_select_func != NULL) {
  798. return config_func_info[dev_num].
  799. tip_dunit_mux_select_func((u8)dev_num, enable);
  800. }
  801. return MV_FAIL;
  802. }
  803. /*
  804. * Dunit Register Write
  805. */
  806. int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access,
  807. u32 if_id, u32 reg_addr, u32 data_value, u32 mask)
  808. {
  809. if (config_func_info[dev_num].tip_dunit_write_func != NULL) {
  810. return config_func_info[dev_num].
  811. tip_dunit_write_func((u8)dev_num, interface_access,
  812. if_id, reg_addr,
  813. data_value, mask);
  814. }
  815. return MV_FAIL;
  816. }
  817. /*
  818. * Dunit Register Read
  819. */
  820. int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access,
  821. u32 if_id, u32 reg_addr, u32 *data, u32 mask)
  822. {
  823. if (config_func_info[dev_num].tip_dunit_read_func != NULL) {
  824. return config_func_info[dev_num].
  825. tip_dunit_read_func((u8)dev_num, interface_access,
  826. if_id, reg_addr,
  827. data, mask);
  828. }
  829. return MV_FAIL;
  830. }
  831. /*
  832. * Dunit Register Polling
  833. */
  834. int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type,
  835. u32 if_id, u32 exp_value, u32 mask, u32 offset,
  836. u32 poll_tries)
  837. {
  838. u32 poll_cnt = 0, interface_num = 0, start_if, end_if;
  839. u32 read_data[MAX_INTERFACE_NUM];
  840. int ret;
  841. int is_fail = 0, is_if_fail;
  842. struct hws_topology_map *tm = ddr3_get_topology_map();
  843. if (access_type == ACCESS_TYPE_MULTICAST) {
  844. start_if = 0;
  845. end_if = MAX_INTERFACE_NUM - 1;
  846. } else {
  847. start_if = if_id;
  848. end_if = if_id;
  849. }
  850. for (interface_num = start_if; interface_num <= end_if; interface_num++) {
  851. /* polling bit 3 for n times */
  852. VALIDATE_ACTIVE(tm->if_act_mask, interface_num);
  853. is_if_fail = 0;
  854. for (poll_cnt = 0; poll_cnt < poll_tries; poll_cnt++) {
  855. ret =
  856. ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST,
  857. interface_num, offset, read_data,
  858. mask);
  859. if (ret != MV_OK)
  860. return ret;
  861. if (read_data[interface_num] == exp_value)
  862. break;
  863. }
  864. if (poll_cnt >= poll_tries) {
  865. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  866. ("max poll IF #%d\n", interface_num));
  867. is_fail = 1;
  868. is_if_fail = 1;
  869. }
  870. training_result[training_stage][interface_num] =
  871. (is_if_fail == 1) ? TEST_FAILED : TEST_SUCCESS;
  872. }
  873. return (is_fail == 0) ? MV_OK : MV_FAIL;
  874. }
  875. /*
  876. * Bus read access
  877. */
  878. int ddr3_tip_bus_read(u32 dev_num, u32 if_id,
  879. enum hws_access_type phy_access, u32 phy_id,
  880. enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data)
  881. {
  882. u32 bus_index = 0;
  883. u32 data_read[MAX_INTERFACE_NUM];
  884. struct hws_topology_map *tm = ddr3_get_topology_map();
  885. if (phy_access == ACCESS_TYPE_MULTICAST) {
  886. for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES();
  887. bus_index++) {
  888. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  889. CHECK_STATUS(ddr3_tip_bus_access
  890. (dev_num, ACCESS_TYPE_UNICAST,
  891. if_id, ACCESS_TYPE_UNICAST,
  892. bus_index, phy_type, reg_addr, 0,
  893. OPERATION_READ));
  894. CHECK_STATUS(ddr3_tip_if_read
  895. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  896. PHY_REG_FILE_ACCESS, data_read,
  897. MASK_ALL_BITS));
  898. data[bus_index] = (data_read[if_id] & 0xffff);
  899. }
  900. } else {
  901. CHECK_STATUS(ddr3_tip_bus_access
  902. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  903. phy_access, phy_id, phy_type, reg_addr, 0,
  904. OPERATION_READ));
  905. CHECK_STATUS(ddr3_tip_if_read
  906. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  907. PHY_REG_FILE_ACCESS, data_read, MASK_ALL_BITS));
  908. /*
  909. * only 16 lsb bit are valid in Phy (each register is different,
  910. * some can actually be less than 16 bits)
  911. */
  912. *data = (data_read[if_id] & 0xffff);
  913. }
  914. return MV_OK;
  915. }
  916. /*
  917. * Bus write access
  918. */
  919. int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type interface_access,
  920. u32 if_id, enum hws_access_type phy_access,
  921. u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr,
  922. u32 data_value)
  923. {
  924. CHECK_STATUS(ddr3_tip_bus_access
  925. (dev_num, interface_access, if_id, phy_access,
  926. phy_id, phy_type, reg_addr, data_value, OPERATION_WRITE));
  927. return MV_OK;
  928. }
  929. /*
  930. * Bus access routine (relevant for both read & write)
  931. */
  932. static int ddr3_tip_bus_access(u32 dev_num, enum hws_access_type interface_access,
  933. u32 if_id, enum hws_access_type phy_access,
  934. u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr,
  935. u32 data_value, enum hws_operation oper_type)
  936. {
  937. u32 addr_low = 0x3f & reg_addr;
  938. u32 addr_hi = ((0xc0 & reg_addr) >> 6);
  939. u32 data_p1 =
  940. (oper_type << 30) + (addr_hi << 28) + (phy_access << 27) +
  941. (phy_type << 26) + (phy_id << 22) + (addr_low << 16) +
  942. (data_value & 0xffff);
  943. u32 data_p2 = data_p1 + (1 << 31);
  944. u32 start_if, end_if;
  945. struct hws_topology_map *tm = ddr3_get_topology_map();
  946. CHECK_STATUS(ddr3_tip_if_write
  947. (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS,
  948. data_p1, MASK_ALL_BITS));
  949. CHECK_STATUS(ddr3_tip_if_write
  950. (dev_num, interface_access, if_id, PHY_REG_FILE_ACCESS,
  951. data_p2, MASK_ALL_BITS));
  952. if (interface_access == ACCESS_TYPE_UNICAST) {
  953. start_if = if_id;
  954. end_if = if_id;
  955. } else {
  956. start_if = 0;
  957. end_if = MAX_INTERFACE_NUM - 1;
  958. }
  959. /* polling for read/write execution done */
  960. for (if_id = start_if; if_id <= end_if; if_id++) {
  961. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  962. CHECK_STATUS(is_bus_access_done
  963. (dev_num, if_id, PHY_REG_FILE_ACCESS, 31));
  964. }
  965. return MV_OK;
  966. }
  967. /*
  968. * Check bus access done
  969. */
  970. static int is_bus_access_done(u32 dev_num, u32 if_id, u32 dunit_reg_adrr,
  971. u32 bit)
  972. {
  973. u32 rd_data = 1;
  974. u32 cnt = 0;
  975. u32 data_read[MAX_INTERFACE_NUM];
  976. CHECK_STATUS(ddr3_tip_if_read
  977. (dev_num, ACCESS_TYPE_UNICAST, if_id, dunit_reg_adrr,
  978. data_read, MASK_ALL_BITS));
  979. rd_data = data_read[if_id];
  980. rd_data &= (1 << bit);
  981. while (rd_data != 0) {
  982. if (cnt++ >= MAX_POLLING_ITERATIONS)
  983. break;
  984. CHECK_STATUS(ddr3_tip_if_read
  985. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  986. dunit_reg_adrr, data_read, MASK_ALL_BITS));
  987. rd_data = data_read[if_id];
  988. rd_data &= (1 << bit);
  989. }
  990. if (cnt < MAX_POLLING_ITERATIONS)
  991. return MV_OK;
  992. else
  993. return MV_FAIL;
  994. }
  995. /*
  996. * Phy read-modify-write
  997. */
  998. int ddr3_tip_bus_read_modify_write(u32 dev_num, enum hws_access_type access_type,
  999. u32 interface_id, u32 phy_id,
  1000. enum hws_ddr_phy phy_type, u32 reg_addr,
  1001. u32 data_value, u32 reg_mask)
  1002. {
  1003. u32 data_val = 0, if_id, start_if, end_if;
  1004. struct hws_topology_map *tm = ddr3_get_topology_map();
  1005. if (access_type == ACCESS_TYPE_MULTICAST) {
  1006. start_if = 0;
  1007. end_if = MAX_INTERFACE_NUM - 1;
  1008. } else {
  1009. start_if = interface_id;
  1010. end_if = interface_id;
  1011. }
  1012. for (if_id = start_if; if_id <= end_if; if_id++) {
  1013. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1014. CHECK_STATUS(ddr3_tip_bus_read
  1015. (dev_num, if_id, ACCESS_TYPE_UNICAST, phy_id,
  1016. phy_type, reg_addr, &data_val));
  1017. data_value = (data_val & (~reg_mask)) | (data_value & reg_mask);
  1018. CHECK_STATUS(ddr3_tip_bus_write
  1019. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1020. ACCESS_TYPE_UNICAST, phy_id, phy_type, reg_addr,
  1021. data_value));
  1022. }
  1023. return MV_OK;
  1024. }
  1025. /*
  1026. * ADLL Calibration
  1027. */
  1028. int adll_calibration(u32 dev_num, enum hws_access_type access_type,
  1029. u32 if_id, enum hws_ddr_freq frequency)
  1030. {
  1031. struct hws_tip_freq_config_info freq_config_info;
  1032. u32 bus_cnt = 0;
  1033. struct hws_topology_map *tm = ddr3_get_topology_map();
  1034. /* Reset Diver_b assert -> de-assert */
  1035. CHECK_STATUS(ddr3_tip_if_write
  1036. (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG,
  1037. 0, 0x10000000));
  1038. mdelay(10);
  1039. CHECK_STATUS(ddr3_tip_if_write
  1040. (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG,
  1041. 0x10000000, 0x10000000));
  1042. if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) {
  1043. CHECK_STATUS(config_func_info[dev_num].
  1044. tip_get_freq_config_info_func((u8)dev_num, frequency,
  1045. &freq_config_info));
  1046. } else {
  1047. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1048. ("tip_get_freq_config_info_func is NULL"));
  1049. return MV_NOT_INITIALIZED;
  1050. }
  1051. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES(); bus_cnt++) {
  1052. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  1053. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1054. (dev_num, access_type, if_id, bus_cnt,
  1055. DDR_PHY_DATA, BW_PHY_REG,
  1056. freq_config_info.bw_per_freq << 8, 0x700));
  1057. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1058. (dev_num, access_type, if_id, bus_cnt,
  1059. DDR_PHY_DATA, RATE_PHY_REG,
  1060. freq_config_info.rate_per_freq, 0x7));
  1061. }
  1062. /* DUnit to Phy drive post edge, ADLL reset assert de-assert */
  1063. CHECK_STATUS(ddr3_tip_if_write
  1064. (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION,
  1065. 0, (0x80000000 | 0x40000000)));
  1066. mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ]));
  1067. CHECK_STATUS(ddr3_tip_if_write
  1068. (dev_num, access_type, if_id, DRAM_PHY_CONFIGURATION,
  1069. (0x80000000 | 0x40000000), (0x80000000 | 0x40000000)));
  1070. /* polling for ADLL Done */
  1071. if (ddr3_tip_if_polling(dev_num, access_type, if_id,
  1072. 0x3ff03ff, 0x3ff03ff, PHY_LOCK_STATUS_REG,
  1073. MAX_POLLING_ITERATIONS) != MV_OK) {
  1074. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1075. ("Freq_set: DDR3 poll failed(1)"));
  1076. }
  1077. /* pup data_pup reset assert-> deassert */
  1078. CHECK_STATUS(ddr3_tip_if_write
  1079. (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG,
  1080. 0, 0x60000000));
  1081. mdelay(10);
  1082. CHECK_STATUS(ddr3_tip_if_write
  1083. (dev_num, access_type, if_id, SDRAM_CONFIGURATION_REG,
  1084. 0x60000000, 0x60000000));
  1085. return MV_OK;
  1086. }
  1087. int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type access_type,
  1088. u32 if_id, enum hws_ddr_freq frequency)
  1089. {
  1090. u32 cl_value = 0, cwl_value = 0, mem_mask = 0, val = 0,
  1091. bus_cnt = 0, t_hclk = 0, t_wr = 0,
  1092. refresh_interval_cnt = 0, cnt_id;
  1093. u32 t_ckclk;
  1094. u32 t_refi = 0, end_if, start_if;
  1095. u32 bus_index = 0;
  1096. int is_dll_off = 0;
  1097. enum hws_speed_bin speed_bin_index = 0;
  1098. struct hws_tip_freq_config_info freq_config_info;
  1099. enum hws_result *flow_result = training_result[training_stage];
  1100. u32 adll_tap = 0;
  1101. u32 cs_mask[MAX_INTERFACE_NUM];
  1102. struct hws_topology_map *tm = ddr3_get_topology_map();
  1103. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  1104. ("dev %d access %d IF %d freq %d\n", dev_num,
  1105. access_type, if_id, frequency));
  1106. if (frequency == DDR_FREQ_LOW_FREQ)
  1107. is_dll_off = 1;
  1108. if (access_type == ACCESS_TYPE_MULTICAST) {
  1109. start_if = 0;
  1110. end_if = MAX_INTERFACE_NUM - 1;
  1111. } else {
  1112. start_if = if_id;
  1113. end_if = if_id;
  1114. }
  1115. /* calculate interface cs mask - Oferb 4/11 */
  1116. /* speed bin can be different for each interface */
  1117. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1118. /* cs enable is active low */
  1119. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1120. cs_mask[if_id] = CS_BIT_MASK;
  1121. training_result[training_stage][if_id] = TEST_SUCCESS;
  1122. ddr3_tip_calc_cs_mask(dev_num, if_id, effective_cs,
  1123. &cs_mask[if_id]);
  1124. }
  1125. /* speed bin can be different for each interface */
  1126. /*
  1127. * moti b - need to remove the loop for multicas access functions
  1128. * and loop the unicast access functions
  1129. */
  1130. for (if_id = start_if; if_id <= end_if; if_id++) {
  1131. if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
  1132. continue;
  1133. flow_result[if_id] = TEST_SUCCESS;
  1134. speed_bin_index =
  1135. tm->interface_params[if_id].speed_bin_index;
  1136. if (tm->interface_params[if_id].memory_freq ==
  1137. frequency) {
  1138. cl_value =
  1139. tm->interface_params[if_id].cas_l;
  1140. cwl_value =
  1141. tm->interface_params[if_id].cas_wl;
  1142. } else {
  1143. cl_value =
  1144. cas_latency_table[speed_bin_index].cl_val[frequency];
  1145. cwl_value =
  1146. cas_write_latency_table[speed_bin_index].
  1147. cl_val[frequency];
  1148. }
  1149. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  1150. ("Freq_set dev 0x%x access 0x%x if 0x%x freq 0x%x speed %d:\n\t",
  1151. dev_num, access_type, if_id,
  1152. frequency, speed_bin_index));
  1153. for (cnt_id = 0; cnt_id < DDR_FREQ_LIMIT; cnt_id++) {
  1154. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE,
  1155. ("%d ",
  1156. cas_latency_table[speed_bin_index].
  1157. cl_val[cnt_id]));
  1158. }
  1159. DEBUG_TRAINING_IP(DEBUG_LEVEL_TRACE, ("\n"));
  1160. mem_mask = 0;
  1161. for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES();
  1162. bus_index++) {
  1163. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  1164. mem_mask |=
  1165. tm->interface_params[if_id].
  1166. as_bus_params[bus_index].mirror_enable_bitmask;
  1167. }
  1168. if (mem_mask != 0) {
  1169. /* motib redundant in KW28 */
  1170. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1171. if_id,
  1172. CS_ENABLE_REG, 0, 0x8));
  1173. }
  1174. /* dll state after exiting SR */
  1175. if (is_dll_off == 1) {
  1176. CHECK_STATUS(ddr3_tip_if_write
  1177. (dev_num, access_type, if_id,
  1178. DFS_REG, 0x1, 0x1));
  1179. } else {
  1180. CHECK_STATUS(ddr3_tip_if_write
  1181. (dev_num, access_type, if_id,
  1182. DFS_REG, 0, 0x1));
  1183. }
  1184. CHECK_STATUS(ddr3_tip_if_write
  1185. (dev_num, access_type, if_id,
  1186. DUNIT_MMASK_REG, 0, 0x1));
  1187. /* DFS - block transactions */
  1188. CHECK_STATUS(ddr3_tip_if_write
  1189. (dev_num, access_type, if_id,
  1190. DFS_REG, 0x2, 0x2));
  1191. /* disable ODT in case of dll off */
  1192. if (is_dll_off == 1) {
  1193. CHECK_STATUS(ddr3_tip_if_write
  1194. (dev_num, access_type, if_id,
  1195. 0x1874, 0, 0x244));
  1196. CHECK_STATUS(ddr3_tip_if_write
  1197. (dev_num, access_type, if_id,
  1198. 0x1884, 0, 0x244));
  1199. CHECK_STATUS(ddr3_tip_if_write
  1200. (dev_num, access_type, if_id,
  1201. 0x1894, 0, 0x244));
  1202. CHECK_STATUS(ddr3_tip_if_write
  1203. (dev_num, access_type, if_id,
  1204. 0x18a4, 0, 0x244));
  1205. }
  1206. /* DFS - Enter Self-Refresh */
  1207. CHECK_STATUS(ddr3_tip_if_write
  1208. (dev_num, access_type, if_id, DFS_REG, 0x4,
  1209. 0x4));
  1210. /* polling on self refresh entry */
  1211. if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST,
  1212. if_id, 0x8, 0x8, DFS_REG,
  1213. MAX_POLLING_ITERATIONS) != MV_OK) {
  1214. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1215. ("Freq_set: DDR3 poll failed on SR entry\n"));
  1216. }
  1217. /* PLL configuration */
  1218. if (config_func_info[dev_num].tip_set_freq_divider_func != NULL) {
  1219. config_func_info[dev_num].
  1220. tip_set_freq_divider_func(dev_num, if_id,
  1221. frequency);
  1222. }
  1223. /* PLL configuration End */
  1224. /* adjust t_refi to new frequency */
  1225. t_refi = (tm->interface_params[if_id].interface_temp ==
  1226. HWS_TEMP_HIGH) ? TREFI_HIGH : TREFI_LOW;
  1227. t_refi *= 1000; /*psec */
  1228. /* HCLK in[ps] */
  1229. t_hclk = MEGA / (freq_val[frequency] / 2);
  1230. refresh_interval_cnt = t_refi / t_hclk; /* no units */
  1231. val = 0x4000 | refresh_interval_cnt;
  1232. CHECK_STATUS(ddr3_tip_if_write
  1233. (dev_num, access_type, if_id,
  1234. SDRAM_CONFIGURATION_REG, val, 0x7fff));
  1235. /* DFS - CL/CWL/WR parameters after exiting SR */
  1236. CHECK_STATUS(ddr3_tip_if_write
  1237. (dev_num, access_type, if_id, DFS_REG,
  1238. (cl_mask_table[cl_value] << 8), 0xf00));
  1239. CHECK_STATUS(ddr3_tip_if_write
  1240. (dev_num, access_type, if_id, DFS_REG,
  1241. (cwl_mask_table[cwl_value] << 12), 0x7000));
  1242. t_ckclk = MEGA / freq_val[frequency];
  1243. t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  1244. SPEED_BIN_TWR),
  1245. t_ckclk);
  1246. CHECK_STATUS(ddr3_tip_if_write
  1247. (dev_num, access_type, if_id, DFS_REG,
  1248. (twr_mask_table[t_wr + 1] << 16), 0x70000));
  1249. /* Restore original RTT values if returning from DLL OFF mode */
  1250. if (is_dll_off == 1) {
  1251. CHECK_STATUS(ddr3_tip_if_write
  1252. (dev_num, access_type, if_id, 0x1874,
  1253. g_dic | g_rtt_nom, 0x266));
  1254. CHECK_STATUS(ddr3_tip_if_write
  1255. (dev_num, access_type, if_id, 0x1884,
  1256. g_dic | g_rtt_nom, 0x266));
  1257. CHECK_STATUS(ddr3_tip_if_write
  1258. (dev_num, access_type, if_id, 0x1894,
  1259. g_dic | g_rtt_nom, 0x266));
  1260. CHECK_STATUS(ddr3_tip_if_write
  1261. (dev_num, access_type, if_id, 0x18a4,
  1262. g_dic | g_rtt_nom, 0x266));
  1263. }
  1264. /* Reset Diver_b assert -> de-assert */
  1265. CHECK_STATUS(ddr3_tip_if_write
  1266. (dev_num, access_type, if_id,
  1267. SDRAM_CONFIGURATION_REG, 0, 0x10000000));
  1268. mdelay(10);
  1269. CHECK_STATUS(ddr3_tip_if_write
  1270. (dev_num, access_type, if_id,
  1271. SDRAM_CONFIGURATION_REG, 0x10000000, 0x10000000));
  1272. /* Adll configuration function of process and Frequency */
  1273. if (config_func_info[dev_num].tip_get_freq_config_info_func != NULL) {
  1274. CHECK_STATUS(config_func_info[dev_num].
  1275. tip_get_freq_config_info_func(dev_num, frequency,
  1276. &freq_config_info));
  1277. }
  1278. /* TBD check milo5 using device ID ? */
  1279. for (bus_cnt = 0; bus_cnt < GET_TOPOLOGY_NUM_OF_BUSES();
  1280. bus_cnt++) {
  1281. VALIDATE_ACTIVE(tm->bus_act_mask, bus_cnt);
  1282. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1283. (dev_num, ACCESS_TYPE_UNICAST,
  1284. if_id, bus_cnt, DDR_PHY_DATA,
  1285. 0x92,
  1286. freq_config_info.
  1287. bw_per_freq << 8
  1288. /*freq_mask[dev_num][frequency] << 8 */
  1289. , 0x700));
  1290. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1291. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1292. bus_cnt, DDR_PHY_DATA, 0x94,
  1293. freq_config_info.rate_per_freq, 0x7));
  1294. }
  1295. /* DUnit to Phy drive post edge, ADLL reset assert de-assert */
  1296. CHECK_STATUS(ddr3_tip_if_write
  1297. (dev_num, access_type, if_id,
  1298. DRAM_PHY_CONFIGURATION, 0,
  1299. (0x80000000 | 0x40000000)));
  1300. mdelay(100 / (freq_val[frequency] / freq_val[DDR_FREQ_LOW_FREQ]));
  1301. CHECK_STATUS(ddr3_tip_if_write
  1302. (dev_num, access_type, if_id,
  1303. DRAM_PHY_CONFIGURATION, (0x80000000 | 0x40000000),
  1304. (0x80000000 | 0x40000000)));
  1305. /* polling for ADLL Done */
  1306. if (ddr3_tip_if_polling
  1307. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3ff03ff,
  1308. 0x3ff03ff, PHY_LOCK_STATUS_REG,
  1309. MAX_POLLING_ITERATIONS) != MV_OK) {
  1310. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1311. ("Freq_set: DDR3 poll failed(1)\n"));
  1312. }
  1313. /* pup data_pup reset assert-> deassert */
  1314. CHECK_STATUS(ddr3_tip_if_write
  1315. (dev_num, access_type, if_id,
  1316. SDRAM_CONFIGURATION_REG, 0, 0x60000000));
  1317. mdelay(10);
  1318. CHECK_STATUS(ddr3_tip_if_write
  1319. (dev_num, access_type, if_id,
  1320. SDRAM_CONFIGURATION_REG, 0x60000000, 0x60000000));
  1321. /* Set proper timing params before existing Self-Refresh */
  1322. ddr3_tip_set_timing(dev_num, access_type, if_id, frequency);
  1323. if (delay_enable != 0) {
  1324. adll_tap = MEGA / (freq_val[frequency] * 64);
  1325. ddr3_tip_cmd_addr_init_delay(dev_num, adll_tap);
  1326. }
  1327. /* Exit SR */
  1328. CHECK_STATUS(ddr3_tip_if_write
  1329. (dev_num, access_type, if_id, DFS_REG, 0,
  1330. 0x4));
  1331. if (ddr3_tip_if_polling
  1332. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x8, DFS_REG,
  1333. MAX_POLLING_ITERATIONS) != MV_OK) {
  1334. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1335. ("Freq_set: DDR3 poll failed(2)"));
  1336. }
  1337. /* Refresh Command */
  1338. CHECK_STATUS(ddr3_tip_if_write
  1339. (dev_num, access_type, if_id,
  1340. SDRAM_OPERATION_REG, 0x2, 0xf1f));
  1341. if (ddr3_tip_if_polling
  1342. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f,
  1343. SDRAM_OPERATION_REG, MAX_POLLING_ITERATIONS) != MV_OK) {
  1344. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1345. ("Freq_set: DDR3 poll failed(3)"));
  1346. }
  1347. /* Release DFS Block */
  1348. CHECK_STATUS(ddr3_tip_if_write
  1349. (dev_num, access_type, if_id, DFS_REG, 0,
  1350. 0x2));
  1351. /* Controller to MBUS Retry - normal */
  1352. CHECK_STATUS(ddr3_tip_if_write
  1353. (dev_num, access_type, if_id, DUNIT_MMASK_REG,
  1354. 0x1, 0x1));
  1355. /* MRO: Burst Length 8, CL , Auto_precharge 0x16cc */
  1356. val =
  1357. ((cl_mask_table[cl_value] & 0x1) << 2) |
  1358. ((cl_mask_table[cl_value] & 0xe) << 3);
  1359. CHECK_STATUS(ddr3_tip_if_write
  1360. (dev_num, access_type, if_id, MR0_REG,
  1361. val, (0x7 << 4) | (1 << 2)));
  1362. /* MR2: CWL = 10 , Auto Self-Refresh - disable */
  1363. val = (cwl_mask_table[cwl_value] << 3);
  1364. /*
  1365. * nklein 24.10.13 - should not be here - leave value as set in
  1366. * the init configuration val |= (1 << 9);
  1367. * val |= ((tm->interface_params[if_id].
  1368. * interface_temp == HWS_TEMP_HIGH) ? (1 << 7) : 0);
  1369. */
  1370. /* nklein 24.10.13 - see above comment */
  1371. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1372. if_id, MR2_REG,
  1373. val, (0x7 << 3)));
  1374. /* ODT TIMING */
  1375. val = ((cl_value - cwl_value + 1) << 4) |
  1376. ((cl_value - cwl_value + 6) << 8) |
  1377. ((cl_value - 1) << 12) | ((cl_value + 6) << 16);
  1378. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1379. if_id, ODT_TIMING_LOW,
  1380. val, 0xffff0));
  1381. val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12);
  1382. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1383. if_id, ODT_TIMING_HI_REG,
  1384. val, 0xffff));
  1385. /* ODT Active */
  1386. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1387. if_id,
  1388. DUNIT_ODT_CONTROL_REG,
  1389. 0xf, 0xf));
  1390. /* re-write CL */
  1391. val = ((cl_mask_table[cl_value] & 0x1) << 2) |
  1392. ((cl_mask_table[cl_value] & 0xe) << 3);
  1393. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1394. 0, MR0_REG, val,
  1395. (0x7 << 4) | (1 << 2)));
  1396. /* re-write CWL */
  1397. val = (cwl_mask_table[cwl_value] << 3);
  1398. CHECK_STATUS(ddr3_tip_write_mrs_cmd(dev_num, cs_mask, MRS2_CMD,
  1399. val, (0x7 << 3)));
  1400. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1401. 0, MR2_REG, val, (0x7 << 3)));
  1402. if (mem_mask != 0) {
  1403. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1404. if_id,
  1405. CS_ENABLE_REG,
  1406. 1 << 3, 0x8));
  1407. }
  1408. }
  1409. return MV_OK;
  1410. }
  1411. /*
  1412. * Set ODT values
  1413. */
  1414. static int ddr3_tip_write_odt(u32 dev_num, enum hws_access_type access_type,
  1415. u32 if_id, u32 cl_value, u32 cwl_value)
  1416. {
  1417. /* ODT TIMING */
  1418. u32 val = (cl_value - cwl_value + 6);
  1419. val = ((cl_value - cwl_value + 1) << 4) | ((val & 0xf) << 8) |
  1420. (((cl_value - 1) & 0xf) << 12) |
  1421. (((cl_value + 6) & 0xf) << 16) | (((val & 0x10) >> 4) << 21);
  1422. val |= (((cl_value - 1) >> 4) << 22) | (((cl_value + 6) >> 4) << 23);
  1423. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1424. ODT_TIMING_LOW, val, 0xffff0));
  1425. val = 0x91 | ((cwl_value - 1) << 8) | ((cwl_value + 5) << 12);
  1426. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1427. ODT_TIMING_HI_REG, val, 0xffff));
  1428. if (odt_additional == 1) {
  1429. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type,
  1430. if_id,
  1431. SDRAM_ODT_CONTROL_HIGH_REG,
  1432. 0xf, 0xf));
  1433. }
  1434. /* ODT Active */
  1435. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1436. DUNIT_ODT_CONTROL_REG, 0xf, 0xf));
  1437. return MV_OK;
  1438. }
  1439. /*
  1440. * Set Timing values for training
  1441. */
  1442. static int ddr3_tip_set_timing(u32 dev_num, enum hws_access_type access_type,
  1443. u32 if_id, enum hws_ddr_freq frequency)
  1444. {
  1445. u32 t_ckclk = 0, t_ras = 0;
  1446. u32 t_rcd = 0, t_rp = 0, t_wr = 0, t_wtr = 0, t_rrd = 0, t_rtp = 0,
  1447. t_rfc = 0, t_mod = 0;
  1448. u32 val = 0, page_size = 0;
  1449. enum hws_speed_bin speed_bin_index;
  1450. enum hws_mem_size memory_size = MEM_2G;
  1451. struct hws_topology_map *tm = ddr3_get_topology_map();
  1452. speed_bin_index = tm->interface_params[if_id].speed_bin_index;
  1453. memory_size = tm->interface_params[if_id].memory_size;
  1454. page_size =
  1455. (tm->interface_params[if_id].bus_width ==
  1456. BUS_WIDTH_8) ? page_param[memory_size].
  1457. page_size_8bit : page_param[memory_size].page_size_16bit;
  1458. t_ckclk = (MEGA / freq_val[frequency]);
  1459. t_rrd = (page_size == 1) ? speed_bin_table(speed_bin_index,
  1460. SPEED_BIN_TRRD1K) :
  1461. speed_bin_table(speed_bin_index, SPEED_BIN_TRRD2K);
  1462. t_rrd = GET_MAX_VALUE(t_ckclk * 4, t_rrd);
  1463. t_rtp = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index,
  1464. SPEED_BIN_TRTP));
  1465. t_wtr = GET_MAX_VALUE(t_ckclk * 4, speed_bin_table(speed_bin_index,
  1466. SPEED_BIN_TWTR));
  1467. t_ras = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  1468. SPEED_BIN_TRAS),
  1469. t_ckclk);
  1470. t_rcd = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  1471. SPEED_BIN_TRCD),
  1472. t_ckclk);
  1473. t_rp = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  1474. SPEED_BIN_TRP),
  1475. t_ckclk);
  1476. t_wr = TIME_2_CLOCK_CYCLES(speed_bin_table(speed_bin_index,
  1477. SPEED_BIN_TWR),
  1478. t_ckclk);
  1479. t_wtr = TIME_2_CLOCK_CYCLES(t_wtr, t_ckclk);
  1480. t_rrd = TIME_2_CLOCK_CYCLES(t_rrd, t_ckclk);
  1481. t_rtp = TIME_2_CLOCK_CYCLES(t_rtp, t_ckclk);
  1482. t_rfc = TIME_2_CLOCK_CYCLES(rfc_table[memory_size] * 1000, t_ckclk);
  1483. t_mod = GET_MAX_VALUE(t_ckclk * 24, 15000);
  1484. t_mod = TIME_2_CLOCK_CYCLES(t_mod, t_ckclk);
  1485. /* SDRAM Timing Low */
  1486. val = (t_ras & 0xf) | (t_rcd << 4) | (t_rp << 8) | (t_wr << 12) |
  1487. (t_wtr << 16) | (((t_ras & 0x30) >> 4) << 20) | (t_rrd << 24) |
  1488. (t_rtp << 28);
  1489. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1490. SDRAM_TIMING_LOW_REG, val, 0xff3fffff));
  1491. /* SDRAM Timing High */
  1492. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1493. SDRAM_TIMING_HIGH_REG,
  1494. t_rfc & 0x7f, 0x7f));
  1495. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1496. SDRAM_TIMING_HIGH_REG,
  1497. 0x180, 0x180));
  1498. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1499. SDRAM_TIMING_HIGH_REG,
  1500. 0x600, 0x600));
  1501. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1502. SDRAM_TIMING_HIGH_REG,
  1503. 0x1800, 0xf800));
  1504. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1505. SDRAM_TIMING_HIGH_REG,
  1506. ((t_rfc & 0x380) >> 7) << 16, 0x70000));
  1507. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1508. SDRAM_TIMING_HIGH_REG, 0,
  1509. 0x380000));
  1510. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1511. SDRAM_TIMING_HIGH_REG,
  1512. (t_mod & 0xf) << 25, 0x1e00000));
  1513. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1514. SDRAM_TIMING_HIGH_REG,
  1515. (t_mod >> 4) << 30, 0xc0000000));
  1516. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1517. SDRAM_TIMING_HIGH_REG,
  1518. 0x16000000, 0x1e000000));
  1519. CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
  1520. SDRAM_TIMING_HIGH_REG,
  1521. 0x40000000, 0xc0000000));
  1522. return MV_OK;
  1523. }
  1524. /*
  1525. * Mode Read
  1526. */
  1527. int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info)
  1528. {
  1529. u32 ret;
  1530. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1531. MR0_REG, mode_info->reg_mr0, MASK_ALL_BITS);
  1532. if (ret != MV_OK)
  1533. return ret;
  1534. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1535. MR1_REG, mode_info->reg_mr1, MASK_ALL_BITS);
  1536. if (ret != MV_OK)
  1537. return ret;
  1538. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1539. MR2_REG, mode_info->reg_mr2, MASK_ALL_BITS);
  1540. if (ret != MV_OK)
  1541. return ret;
  1542. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1543. MR3_REG, mode_info->reg_mr2, MASK_ALL_BITS);
  1544. if (ret != MV_OK)
  1545. return ret;
  1546. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1547. READ_DATA_SAMPLE_DELAY, mode_info->read_data_sample,
  1548. MASK_ALL_BITS);
  1549. if (ret != MV_OK)
  1550. return ret;
  1551. ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1552. READ_DATA_READY_DELAY, mode_info->read_data_ready,
  1553. MASK_ALL_BITS);
  1554. if (ret != MV_OK)
  1555. return ret;
  1556. return MV_OK;
  1557. }
  1558. /*
  1559. * Get first active IF
  1560. */
  1561. int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask,
  1562. u32 *interface_id)
  1563. {
  1564. u32 if_id;
  1565. struct hws_topology_map *tm = ddr3_get_topology_map();
  1566. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1567. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1568. if (interface_mask & (1 << if_id)) {
  1569. *interface_id = if_id;
  1570. break;
  1571. }
  1572. }
  1573. return MV_OK;
  1574. }
  1575. /*
  1576. * Write CS Result
  1577. */
  1578. int ddr3_tip_write_cs_result(u32 dev_num, u32 offset)
  1579. {
  1580. u32 if_id, bus_num, cs_bitmask, data_val, cs_num;
  1581. struct hws_topology_map *tm = ddr3_get_topology_map();
  1582. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1583. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1584. for (bus_num = 0; bus_num < tm->num_of_bus_per_interface;
  1585. bus_num++) {
  1586. VALIDATE_ACTIVE(tm->bus_act_mask, bus_num);
  1587. cs_bitmask =
  1588. tm->interface_params[if_id].
  1589. as_bus_params[bus_num].cs_bitmask;
  1590. if (cs_bitmask != effective_cs) {
  1591. cs_num = GET_CS_FROM_MASK(cs_bitmask);
  1592. ddr3_tip_bus_read(dev_num, if_id,
  1593. ACCESS_TYPE_UNICAST, bus_num,
  1594. DDR_PHY_DATA,
  1595. offset +
  1596. CS_REG_VALUE(effective_cs),
  1597. &data_val);
  1598. ddr3_tip_bus_write(dev_num,
  1599. ACCESS_TYPE_UNICAST,
  1600. if_id,
  1601. ACCESS_TYPE_UNICAST,
  1602. bus_num, DDR_PHY_DATA,
  1603. offset +
  1604. CS_REG_VALUE(cs_num),
  1605. data_val);
  1606. }
  1607. }
  1608. }
  1609. return MV_OK;
  1610. }
  1611. /*
  1612. * Write MRS
  1613. */
  1614. int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd,
  1615. u32 data, u32 mask)
  1616. {
  1617. u32 if_id, reg;
  1618. struct hws_topology_map *tm = ddr3_get_topology_map();
  1619. reg = (cmd == MRS1_CMD) ? MR1_REG : MR2_REG;
  1620. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1621. PARAM_NOT_CARE, reg, data, mask));
  1622. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1623. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1624. CHECK_STATUS(ddr3_tip_if_write
  1625. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1626. SDRAM_OPERATION_REG,
  1627. (cs_mask_arr[if_id] << 8) | cmd, 0xf1f));
  1628. }
  1629. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1630. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1631. if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0,
  1632. 0x1f, SDRAM_OPERATION_REG,
  1633. MAX_POLLING_ITERATIONS) != MV_OK) {
  1634. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1635. ("write_mrs_cmd: Poll cmd fail"));
  1636. }
  1637. }
  1638. return MV_OK;
  1639. }
  1640. /*
  1641. * Reset XSB Read FIFO
  1642. */
  1643. int ddr3_tip_reset_fifo_ptr(u32 dev_num)
  1644. {
  1645. u32 if_id = 0;
  1646. /* Configure PHY reset value to 0 in order to "clean" the FIFO */
  1647. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1648. if_id, 0x15c8, 0, 0xff000000));
  1649. /*
  1650. * Move PHY to RL mode (only in RL mode the PHY overrides FIFO values
  1651. * during FIFO reset)
  1652. */
  1653. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1654. if_id, TRAINING_SW_2_REG,
  1655. 0x1, 0x9));
  1656. /* In order that above configuration will influence the PHY */
  1657. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1658. if_id, 0x15b0,
  1659. 0x80000000, 0x80000000));
  1660. /* Reset read fifo assertion */
  1661. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1662. if_id, 0x1400, 0, 0x40000000));
  1663. /* Reset read fifo deassertion */
  1664. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1665. if_id, 0x1400,
  1666. 0x40000000, 0x40000000));
  1667. /* Move PHY back to functional mode */
  1668. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1669. if_id, TRAINING_SW_2_REG,
  1670. 0x8, 0x9));
  1671. /* Stop training machine */
  1672. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1673. if_id, 0x15b4, 0x10000, 0x10000));
  1674. return MV_OK;
  1675. }
  1676. /*
  1677. * Reset Phy registers
  1678. */
  1679. int ddr3_tip_ddr3_reset_phy_regs(u32 dev_num)
  1680. {
  1681. u32 if_id, phy_id, cs;
  1682. struct hws_topology_map *tm = ddr3_get_topology_map();
  1683. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1684. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1685. for (phy_id = 0; phy_id < tm->num_of_bus_per_interface;
  1686. phy_id++) {
  1687. VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
  1688. CHECK_STATUS(ddr3_tip_bus_write
  1689. (dev_num, ACCESS_TYPE_UNICAST,
  1690. if_id, ACCESS_TYPE_UNICAST,
  1691. phy_id, DDR_PHY_DATA,
  1692. WL_PHY_REG +
  1693. CS_REG_VALUE(effective_cs),
  1694. phy_reg0_val));
  1695. CHECK_STATUS(ddr3_tip_bus_write
  1696. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1697. ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA,
  1698. RL_PHY_REG + CS_REG_VALUE(effective_cs),
  1699. phy_reg2_val));
  1700. CHECK_STATUS(ddr3_tip_bus_write
  1701. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1702. ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA,
  1703. READ_CENTRALIZATION_PHY_REG +
  1704. CS_REG_VALUE(effective_cs), phy_reg3_val));
  1705. CHECK_STATUS(ddr3_tip_bus_write
  1706. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  1707. ACCESS_TYPE_UNICAST, phy_id, DDR_PHY_DATA,
  1708. WRITE_CENTRALIZATION_PHY_REG +
  1709. CS_REG_VALUE(effective_cs), phy_reg3_val));
  1710. }
  1711. }
  1712. /* Set Receiver Calibration value */
  1713. for (cs = 0; cs < MAX_CS_NUM; cs++) {
  1714. /* PHY register 0xdb bits[5:0] - configure to 63 */
  1715. CHECK_STATUS(ddr3_tip_bus_write
  1716. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1717. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1718. DDR_PHY_DATA, CSN_IOB_VREF_REG(cs), 63));
  1719. }
  1720. return MV_OK;
  1721. }
  1722. /*
  1723. * Restore Dunit registers
  1724. */
  1725. int ddr3_tip_restore_dunit_regs(u32 dev_num)
  1726. {
  1727. u32 index_cnt;
  1728. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1729. PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG,
  1730. 0x1, 0x1));
  1731. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1732. PARAM_NOT_CARE, CALIB_MACHINE_CTRL_REG,
  1733. calibration_update_control << 3,
  1734. 0x3 << 3));
  1735. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST,
  1736. PARAM_NOT_CARE,
  1737. ODPG_WRITE_READ_MODE_ENABLE_REG,
  1738. 0xffff, MASK_ALL_BITS));
  1739. for (index_cnt = 0; index_cnt < ARRAY_SIZE(odpg_default_value);
  1740. index_cnt++) {
  1741. CHECK_STATUS(ddr3_tip_if_write
  1742. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1743. odpg_default_value[index_cnt].reg_addr,
  1744. odpg_default_value[index_cnt].reg_data,
  1745. odpg_default_value[index_cnt].reg_mask));
  1746. }
  1747. return MV_OK;
  1748. }
  1749. /*
  1750. * Auto tune main flow
  1751. */
  1752. static int ddr3_tip_ddr3_training_main_flow(u32 dev_num)
  1753. {
  1754. enum hws_ddr_freq freq = init_freq;
  1755. struct init_cntr_param init_cntr_prm;
  1756. int ret = MV_OK;
  1757. u32 if_id;
  1758. u32 max_cs = hws_ddr3_tip_max_cs_get();
  1759. struct hws_topology_map *tm = ddr3_get_topology_map();
  1760. #ifndef EXCLUDE_SWITCH_DEBUG
  1761. if (debug_training == DEBUG_LEVEL_TRACE) {
  1762. CHECK_STATUS(print_device_info((u8)dev_num));
  1763. }
  1764. #endif
  1765. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1766. CHECK_STATUS(ddr3_tip_ddr3_reset_phy_regs(dev_num));
  1767. }
  1768. /* Set to 0 after each loop to avoid illegal value may be used */
  1769. effective_cs = 0;
  1770. freq = init_freq;
  1771. if (is_pll_before_init != 0) {
  1772. for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
  1773. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  1774. config_func_info[dev_num].tip_set_freq_divider_func(
  1775. (u8)dev_num, if_id, freq);
  1776. }
  1777. }
  1778. if (is_adll_calib_before_init != 0) {
  1779. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1780. ("with adll calib before init\n"));
  1781. adll_calibration(dev_num, ACCESS_TYPE_MULTICAST, 0, freq);
  1782. }
  1783. if (is_reg_dump != 0) {
  1784. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1785. ("Dump before init controller\n"));
  1786. ddr3_tip_reg_dump(dev_num);
  1787. }
  1788. if (mask_tune_func & INIT_CONTROLLER_MASK_BIT) {
  1789. training_stage = INIT_CONTROLLER;
  1790. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1791. ("INIT_CONTROLLER_MASK_BIT\n"));
  1792. init_cntr_prm.do_mrs_phy = 1;
  1793. init_cntr_prm.is_ctrl64_bit = 0;
  1794. init_cntr_prm.init_phy = 1;
  1795. init_cntr_prm.msys_init = 0;
  1796. ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm);
  1797. if (is_reg_dump != 0)
  1798. ddr3_tip_reg_dump(dev_num);
  1799. if (ret != MV_OK) {
  1800. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1801. ("hws_ddr3_tip_init_controller failure\n"));
  1802. if (debug_mode == 0)
  1803. return MV_FAIL;
  1804. }
  1805. }
  1806. #ifdef STATIC_ALGO_SUPPORT
  1807. if (mask_tune_func & STATIC_LEVELING_MASK_BIT) {
  1808. training_stage = STATIC_LEVELING;
  1809. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1810. ("STATIC_LEVELING_MASK_BIT\n"));
  1811. ret = ddr3_tip_run_static_alg(dev_num, freq);
  1812. if (is_reg_dump != 0)
  1813. ddr3_tip_reg_dump(dev_num);
  1814. if (ret != MV_OK) {
  1815. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1816. ("ddr3_tip_run_static_alg failure\n"));
  1817. if (debug_mode == 0)
  1818. return MV_FAIL;
  1819. }
  1820. }
  1821. #endif
  1822. if (mask_tune_func & SET_LOW_FREQ_MASK_BIT) {
  1823. training_stage = SET_LOW_FREQ;
  1824. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1825. ("SET_LOW_FREQ_MASK_BIT %d\n",
  1826. freq_val[low_freq]));
  1827. ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST,
  1828. PARAM_NOT_CARE, low_freq);
  1829. if (is_reg_dump != 0)
  1830. ddr3_tip_reg_dump(dev_num);
  1831. if (ret != MV_OK) {
  1832. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1833. ("ddr3_tip_freq_set failure\n"));
  1834. if (debug_mode == 0)
  1835. return MV_FAIL;
  1836. }
  1837. }
  1838. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1839. if (mask_tune_func & LOAD_PATTERN_MASK_BIT) {
  1840. training_stage = LOAD_PATTERN;
  1841. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1842. ("LOAD_PATTERN_MASK_BIT #%d\n",
  1843. effective_cs));
  1844. ret = ddr3_tip_load_all_pattern_to_mem(dev_num);
  1845. if (is_reg_dump != 0)
  1846. ddr3_tip_reg_dump(dev_num);
  1847. if (ret != MV_OK) {
  1848. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1849. ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n",
  1850. effective_cs));
  1851. if (debug_mode == 0)
  1852. return MV_FAIL;
  1853. }
  1854. }
  1855. }
  1856. /* Set to 0 after each loop to avoid illegal value may be used */
  1857. effective_cs = 0;
  1858. if (mask_tune_func & SET_MEDIUM_FREQ_MASK_BIT) {
  1859. training_stage = SET_MEDIUM_FREQ;
  1860. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1861. ("SET_MEDIUM_FREQ_MASK_BIT %d\n",
  1862. freq_val[medium_freq]));
  1863. ret =
  1864. ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST,
  1865. PARAM_NOT_CARE, medium_freq);
  1866. if (is_reg_dump != 0)
  1867. ddr3_tip_reg_dump(dev_num);
  1868. if (ret != MV_OK) {
  1869. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1870. ("ddr3_tip_freq_set failure\n"));
  1871. if (debug_mode == 0)
  1872. return MV_FAIL;
  1873. }
  1874. }
  1875. if (mask_tune_func & WRITE_LEVELING_MASK_BIT) {
  1876. training_stage = WRITE_LEVELING;
  1877. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1878. ("WRITE_LEVELING_MASK_BIT\n"));
  1879. if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) {
  1880. ret = ddr3_tip_dynamic_write_leveling(dev_num);
  1881. } else {
  1882. /* Use old WL */
  1883. ret = ddr3_tip_legacy_dynamic_write_leveling(dev_num);
  1884. }
  1885. if (is_reg_dump != 0)
  1886. ddr3_tip_reg_dump(dev_num);
  1887. if (ret != MV_OK) {
  1888. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1889. ("ddr3_tip_dynamic_write_leveling failure\n"));
  1890. if (debug_mode == 0)
  1891. return MV_FAIL;
  1892. }
  1893. }
  1894. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1895. if (mask_tune_func & LOAD_PATTERN_2_MASK_BIT) {
  1896. training_stage = LOAD_PATTERN_2;
  1897. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1898. ("LOAD_PATTERN_2_MASK_BIT CS #%d\n",
  1899. effective_cs));
  1900. ret = ddr3_tip_load_all_pattern_to_mem(dev_num);
  1901. if (is_reg_dump != 0)
  1902. ddr3_tip_reg_dump(dev_num);
  1903. if (ret != MV_OK) {
  1904. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1905. ("ddr3_tip_load_all_pattern_to_mem failure CS #%d\n",
  1906. effective_cs));
  1907. if (debug_mode == 0)
  1908. return MV_FAIL;
  1909. }
  1910. }
  1911. }
  1912. /* Set to 0 after each loop to avoid illegal value may be used */
  1913. effective_cs = 0;
  1914. if (mask_tune_func & READ_LEVELING_MASK_BIT) {
  1915. training_stage = READ_LEVELING;
  1916. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1917. ("READ_LEVELING_MASK_BIT\n"));
  1918. if ((rl_mid_freq_wa == 0) || (freq_val[medium_freq] == 533)) {
  1919. ret = ddr3_tip_dynamic_read_leveling(dev_num, medium_freq);
  1920. } else {
  1921. /* Use old RL */
  1922. ret = ddr3_tip_legacy_dynamic_read_leveling(dev_num);
  1923. }
  1924. if (is_reg_dump != 0)
  1925. ddr3_tip_reg_dump(dev_num);
  1926. if (ret != MV_OK) {
  1927. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1928. ("ddr3_tip_dynamic_read_leveling failure\n"));
  1929. if (debug_mode == 0)
  1930. return MV_FAIL;
  1931. }
  1932. }
  1933. if (mask_tune_func & WRITE_LEVELING_SUPP_MASK_BIT) {
  1934. training_stage = WRITE_LEVELING_SUPP;
  1935. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1936. ("WRITE_LEVELING_SUPP_MASK_BIT\n"));
  1937. ret = ddr3_tip_dynamic_write_leveling_supp(dev_num);
  1938. if (is_reg_dump != 0)
  1939. ddr3_tip_reg_dump(dev_num);
  1940. if (ret != MV_OK) {
  1941. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1942. ("ddr3_tip_dynamic_write_leveling_supp failure\n"));
  1943. if (debug_mode == 0)
  1944. return MV_FAIL;
  1945. }
  1946. }
  1947. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1948. if (mask_tune_func & PBS_RX_MASK_BIT) {
  1949. training_stage = PBS_RX;
  1950. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1951. ("PBS_RX_MASK_BIT CS #%d\n",
  1952. effective_cs));
  1953. ret = ddr3_tip_pbs_rx(dev_num);
  1954. if (is_reg_dump != 0)
  1955. ddr3_tip_reg_dump(dev_num);
  1956. if (ret != MV_OK) {
  1957. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1958. ("ddr3_tip_pbs_rx failure CS #%d\n",
  1959. effective_cs));
  1960. if (debug_mode == 0)
  1961. return MV_FAIL;
  1962. }
  1963. }
  1964. }
  1965. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  1966. if (mask_tune_func & PBS_TX_MASK_BIT) {
  1967. training_stage = PBS_TX;
  1968. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1969. ("PBS_TX_MASK_BIT CS #%d\n",
  1970. effective_cs));
  1971. ret = ddr3_tip_pbs_tx(dev_num);
  1972. if (is_reg_dump != 0)
  1973. ddr3_tip_reg_dump(dev_num);
  1974. if (ret != MV_OK) {
  1975. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  1976. ("ddr3_tip_pbs_tx failure CS #%d\n",
  1977. effective_cs));
  1978. if (debug_mode == 0)
  1979. return MV_FAIL;
  1980. }
  1981. }
  1982. }
  1983. /* Set to 0 after each loop to avoid illegal value may be used */
  1984. effective_cs = 0;
  1985. if (mask_tune_func & SET_TARGET_FREQ_MASK_BIT) {
  1986. training_stage = SET_TARGET_FREQ;
  1987. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  1988. ("SET_TARGET_FREQ_MASK_BIT %d\n",
  1989. freq_val[tm->
  1990. interface_params[first_active_if].
  1991. memory_freq]));
  1992. ret = ddr3_tip_freq_set(dev_num, ACCESS_TYPE_MULTICAST,
  1993. PARAM_NOT_CARE,
  1994. tm->interface_params[first_active_if].
  1995. memory_freq);
  1996. if (is_reg_dump != 0)
  1997. ddr3_tip_reg_dump(dev_num);
  1998. if (ret != MV_OK) {
  1999. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2000. ("ddr3_tip_freq_set failure\n"));
  2001. if (debug_mode == 0)
  2002. return MV_FAIL;
  2003. }
  2004. }
  2005. if (mask_tune_func & WRITE_LEVELING_TF_MASK_BIT) {
  2006. training_stage = WRITE_LEVELING_TF;
  2007. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2008. ("WRITE_LEVELING_TF_MASK_BIT\n"));
  2009. ret = ddr3_tip_dynamic_write_leveling(dev_num);
  2010. if (is_reg_dump != 0)
  2011. ddr3_tip_reg_dump(dev_num);
  2012. if (ret != MV_OK) {
  2013. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2014. ("ddr3_tip_dynamic_write_leveling TF failure\n"));
  2015. if (debug_mode == 0)
  2016. return MV_FAIL;
  2017. }
  2018. }
  2019. if (mask_tune_func & LOAD_PATTERN_HIGH_MASK_BIT) {
  2020. training_stage = LOAD_PATTERN_HIGH;
  2021. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("LOAD_PATTERN_HIGH\n"));
  2022. ret = ddr3_tip_load_all_pattern_to_mem(dev_num);
  2023. if (is_reg_dump != 0)
  2024. ddr3_tip_reg_dump(dev_num);
  2025. if (ret != MV_OK) {
  2026. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2027. ("ddr3_tip_load_all_pattern_to_mem failure\n"));
  2028. if (debug_mode == 0)
  2029. return MV_FAIL;
  2030. }
  2031. }
  2032. if (mask_tune_func & READ_LEVELING_TF_MASK_BIT) {
  2033. training_stage = READ_LEVELING_TF;
  2034. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2035. ("READ_LEVELING_TF_MASK_BIT\n"));
  2036. ret = ddr3_tip_dynamic_read_leveling(dev_num, tm->
  2037. interface_params[first_active_if].
  2038. memory_freq);
  2039. if (is_reg_dump != 0)
  2040. ddr3_tip_reg_dump(dev_num);
  2041. if (ret != MV_OK) {
  2042. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2043. ("ddr3_tip_dynamic_read_leveling TF failure\n"));
  2044. if (debug_mode == 0)
  2045. return MV_FAIL;
  2046. }
  2047. }
  2048. if (mask_tune_func & DM_PBS_TX_MASK_BIT) {
  2049. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("DM_PBS_TX_MASK_BIT\n"));
  2050. }
  2051. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  2052. if (mask_tune_func & VREF_CALIBRATION_MASK_BIT) {
  2053. training_stage = VREF_CALIBRATION;
  2054. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("VREF\n"));
  2055. ret = ddr3_tip_vref(dev_num);
  2056. if (is_reg_dump != 0) {
  2057. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2058. ("VREF Dump\n"));
  2059. ddr3_tip_reg_dump(dev_num);
  2060. }
  2061. if (ret != MV_OK) {
  2062. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2063. ("ddr3_tip_vref failure\n"));
  2064. if (debug_mode == 0)
  2065. return MV_FAIL;
  2066. }
  2067. }
  2068. }
  2069. /* Set to 0 after each loop to avoid illegal value may be used */
  2070. effective_cs = 0;
  2071. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  2072. if (mask_tune_func & CENTRALIZATION_RX_MASK_BIT) {
  2073. training_stage = CENTRALIZATION_RX;
  2074. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2075. ("CENTRALIZATION_RX_MASK_BIT CS #%d\n",
  2076. effective_cs));
  2077. ret = ddr3_tip_centralization_rx(dev_num);
  2078. if (is_reg_dump != 0)
  2079. ddr3_tip_reg_dump(dev_num);
  2080. if (ret != MV_OK) {
  2081. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2082. ("ddr3_tip_centralization_rx failure CS #%d\n",
  2083. effective_cs));
  2084. if (debug_mode == 0)
  2085. return MV_FAIL;
  2086. }
  2087. }
  2088. }
  2089. /* Set to 0 after each loop to avoid illegal value may be used */
  2090. effective_cs = 0;
  2091. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  2092. if (mask_tune_func & WRITE_LEVELING_SUPP_TF_MASK_BIT) {
  2093. training_stage = WRITE_LEVELING_SUPP_TF;
  2094. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2095. ("WRITE_LEVELING_SUPP_TF_MASK_BIT CS #%d\n",
  2096. effective_cs));
  2097. ret = ddr3_tip_dynamic_write_leveling_supp(dev_num);
  2098. if (is_reg_dump != 0)
  2099. ddr3_tip_reg_dump(dev_num);
  2100. if (ret != MV_OK) {
  2101. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2102. ("ddr3_tip_dynamic_write_leveling_supp TF failure CS #%d\n",
  2103. effective_cs));
  2104. if (debug_mode == 0)
  2105. return MV_FAIL;
  2106. }
  2107. }
  2108. }
  2109. /* Set to 0 after each loop to avoid illegal value may be used */
  2110. effective_cs = 0;
  2111. for (effective_cs = 0; effective_cs < max_cs; effective_cs++) {
  2112. if (mask_tune_func & CENTRALIZATION_TX_MASK_BIT) {
  2113. training_stage = CENTRALIZATION_TX;
  2114. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2115. ("CENTRALIZATION_TX_MASK_BIT CS #%d\n",
  2116. effective_cs));
  2117. ret = ddr3_tip_centralization_tx(dev_num);
  2118. if (is_reg_dump != 0)
  2119. ddr3_tip_reg_dump(dev_num);
  2120. if (ret != MV_OK) {
  2121. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2122. ("ddr3_tip_centralization_tx failure CS #%d\n",
  2123. effective_cs));
  2124. if (debug_mode == 0)
  2125. return MV_FAIL;
  2126. }
  2127. }
  2128. }
  2129. /* Set to 0 after each loop to avoid illegal value may be used */
  2130. effective_cs = 0;
  2131. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("restore registers to default\n"));
  2132. /* restore register values */
  2133. CHECK_STATUS(ddr3_tip_restore_dunit_regs(dev_num));
  2134. if (is_reg_dump != 0)
  2135. ddr3_tip_reg_dump(dev_num);
  2136. return MV_OK;
  2137. }
  2138. /*
  2139. * DDR3 Dynamic training flow
  2140. */
  2141. static int ddr3_tip_ddr3_auto_tune(u32 dev_num)
  2142. {
  2143. u32 if_id, stage, ret;
  2144. int is_if_fail = 0, is_auto_tune_fail = 0;
  2145. training_stage = INIT_CONTROLLER;
  2146. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  2147. for (stage = 0; stage < MAX_STAGE_LIMIT; stage++)
  2148. training_result[stage][if_id] = NO_TEST_DONE;
  2149. }
  2150. ret = ddr3_tip_ddr3_training_main_flow(dev_num);
  2151. /* activate XSB test */
  2152. if (xsb_validate_type != 0) {
  2153. run_xsb_test(dev_num, xsb_validation_base_address, 1, 1,
  2154. 0x1024);
  2155. }
  2156. if (is_reg_dump != 0)
  2157. ddr3_tip_reg_dump(dev_num);
  2158. /* print log */
  2159. CHECK_STATUS(ddr3_tip_print_log(dev_num, window_mem_addr));
  2160. if (ret != MV_OK) {
  2161. CHECK_STATUS(ddr3_tip_print_stability_log(dev_num));
  2162. }
  2163. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  2164. is_if_fail = 0;
  2165. for (stage = 0; stage < MAX_STAGE_LIMIT; stage++) {
  2166. if (training_result[stage][if_id] == TEST_FAILED)
  2167. is_if_fail = 1;
  2168. }
  2169. if (is_if_fail == 1) {
  2170. is_auto_tune_fail = 1;
  2171. DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO,
  2172. ("Auto Tune failed for IF %d\n",
  2173. if_id));
  2174. }
  2175. }
  2176. if ((ret == MV_FAIL) || (is_auto_tune_fail == 1))
  2177. return MV_FAIL;
  2178. else
  2179. return MV_OK;
  2180. }
  2181. /*
  2182. * Enable init sequence
  2183. */
  2184. int ddr3_tip_enable_init_sequence(u32 dev_num)
  2185. {
  2186. int is_fail = 0;
  2187. u32 if_id = 0, mem_mask = 0, bus_index = 0;
  2188. struct hws_topology_map *tm = ddr3_get_topology_map();
  2189. /* Enable init sequence */
  2190. CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_MULTICAST, 0,
  2191. SDRAM_INIT_CONTROL_REG, 0x1, 0x1));
  2192. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  2193. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  2194. if (ddr3_tip_if_polling
  2195. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1,
  2196. SDRAM_INIT_CONTROL_REG,
  2197. MAX_POLLING_ITERATIONS) != MV_OK) {
  2198. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2199. ("polling failed IF %d\n",
  2200. if_id));
  2201. is_fail = 1;
  2202. continue;
  2203. }
  2204. mem_mask = 0;
  2205. for (bus_index = 0; bus_index < GET_TOPOLOGY_NUM_OF_BUSES();
  2206. bus_index++) {
  2207. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  2208. mem_mask |=
  2209. tm->interface_params[if_id].
  2210. as_bus_params[bus_index].mirror_enable_bitmask;
  2211. }
  2212. if (mem_mask != 0) {
  2213. /* Disable Multi CS */
  2214. CHECK_STATUS(ddr3_tip_if_write
  2215. (dev_num, ACCESS_TYPE_MULTICAST,
  2216. if_id, CS_ENABLE_REG, 1 << 3,
  2217. 1 << 3));
  2218. }
  2219. }
  2220. return (is_fail == 0) ? MV_OK : MV_FAIL;
  2221. }
  2222. int ddr3_tip_register_dq_table(u32 dev_num, u32 *table)
  2223. {
  2224. dq_map_table = table;
  2225. return MV_OK;
  2226. }
  2227. /*
  2228. * Check if pup search is locked
  2229. */
  2230. int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode)
  2231. {
  2232. u32 bit_start = 0, bit_end = 0, bit_id;
  2233. if (read_mode == RESULT_PER_BIT) {
  2234. bit_start = 0;
  2235. bit_end = BUS_WIDTH_IN_BITS - 1;
  2236. } else {
  2237. bit_start = 0;
  2238. bit_end = 0;
  2239. }
  2240. for (bit_id = bit_start; bit_id <= bit_end; bit_id++) {
  2241. if (GET_LOCK_RESULT(pup_buf[bit_id]) == 0)
  2242. return 0;
  2243. }
  2244. return 1;
  2245. }
  2246. /*
  2247. * Get minimum buffer value
  2248. */
  2249. u8 ddr3_tip_get_buf_min(u8 *buf_ptr)
  2250. {
  2251. u8 min_val = 0xff;
  2252. u8 cnt = 0;
  2253. for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) {
  2254. if (buf_ptr[cnt] < min_val)
  2255. min_val = buf_ptr[cnt];
  2256. }
  2257. return min_val;
  2258. }
  2259. /*
  2260. * Get maximum buffer value
  2261. */
  2262. u8 ddr3_tip_get_buf_max(u8 *buf_ptr)
  2263. {
  2264. u8 max_val = 0;
  2265. u8 cnt = 0;
  2266. for (cnt = 0; cnt < BUS_WIDTH_IN_BITS; cnt++) {
  2267. if (buf_ptr[cnt] > max_val)
  2268. max_val = buf_ptr[cnt];
  2269. }
  2270. return max_val;
  2271. }
  2272. /*
  2273. * The following functions return memory parameters:
  2274. * bus and device width, device size
  2275. */
  2276. u32 hws_ddr3_get_bus_width(void)
  2277. {
  2278. struct hws_topology_map *tm = ddr3_get_topology_map();
  2279. return (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) ==
  2280. 1) ? 16 : 32;
  2281. }
  2282. u32 hws_ddr3_get_device_width(u32 if_id)
  2283. {
  2284. struct hws_topology_map *tm = ddr3_get_topology_map();
  2285. return (tm->interface_params[if_id].bus_width ==
  2286. BUS_WIDTH_8) ? 8 : 16;
  2287. }
  2288. u32 hws_ddr3_get_device_size(u32 if_id)
  2289. {
  2290. struct hws_topology_map *tm = ddr3_get_topology_map();
  2291. if (tm->interface_params[if_id].memory_size >=
  2292. MEM_SIZE_LAST) {
  2293. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2294. ("Error: Wrong device size of Cs: %d",
  2295. tm->interface_params[if_id].memory_size));
  2296. return 0;
  2297. } else {
  2298. return 1 << tm->interface_params[if_id].memory_size;
  2299. }
  2300. }
  2301. int hws_ddr3_calc_mem_cs_size(u32 if_id, u32 cs, u32 *cs_size)
  2302. {
  2303. u32 cs_mem_size, dev_size;
  2304. dev_size = hws_ddr3_get_device_size(if_id);
  2305. if (dev_size != 0) {
  2306. cs_mem_size = ((hws_ddr3_get_bus_width() /
  2307. hws_ddr3_get_device_width(if_id)) * dev_size);
  2308. /* the calculated result in Gbytex16 to avoid float using */
  2309. if (cs_mem_size == 2) {
  2310. *cs_size = _128M;
  2311. } else if (cs_mem_size == 4) {
  2312. *cs_size = _256M;
  2313. } else if (cs_mem_size == 8) {
  2314. *cs_size = _512M;
  2315. } else if (cs_mem_size == 16) {
  2316. *cs_size = _1G;
  2317. } else if (cs_mem_size == 32) {
  2318. *cs_size = _2G;
  2319. } else {
  2320. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2321. ("Error: Wrong Memory size of Cs: %d", cs));
  2322. return MV_FAIL;
  2323. }
  2324. return MV_OK;
  2325. } else {
  2326. return MV_FAIL;
  2327. }
  2328. }
  2329. int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr)
  2330. {
  2331. u32 cs_mem_size = 0;
  2332. #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
  2333. u32 physical_mem_size;
  2334. u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
  2335. #endif
  2336. if (hws_ddr3_calc_mem_cs_size(if_id, cs, &cs_mem_size) != MV_OK)
  2337. return MV_FAIL;
  2338. #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
  2339. struct hws_topology_map *tm = ddr3_get_topology_map();
  2340. /*
  2341. * if number of address pins doesn't allow to use max mem size that
  2342. * is defined in topology mem size is defined by
  2343. * DEVICE_MAX_DRAM_ADDRESS_SIZE
  2344. */
  2345. physical_mem_size =
  2346. mv_hwsmem_size[tm->interface_params[0].memory_size];
  2347. if (hws_ddr3_get_device_width(cs) == 16) {
  2348. /*
  2349. * 16bit mem device can be twice more - no need in less
  2350. * significant pin
  2351. */
  2352. max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
  2353. }
  2354. if (physical_mem_size > max_mem_size) {
  2355. cs_mem_size = max_mem_size *
  2356. (hws_ddr3_get_bus_width() /
  2357. hws_ddr3_get_device_width(if_id));
  2358. DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR,
  2359. ("Updated Physical Mem size is from 0x%x to %x\n",
  2360. physical_mem_size,
  2361. DEVICE_MAX_DRAM_ADDRESS_SIZE));
  2362. }
  2363. #endif
  2364. /* calculate CS base addr */
  2365. *cs_base_addr = ((cs_mem_size) * cs) & 0xffff0000;
  2366. return MV_OK;
  2367. }