smc.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616
  1. /*
  2. * Copyright (C) 2013, Intel Corporation
  3. * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
  4. *
  5. * Ported from Intel released Quark UEFI BIOS
  6. * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
  7. *
  8. * SPDX-License-Identifier: Intel
  9. */
  10. #include <common.h>
  11. #include <pci.h>
  12. #include <asm/arch/device.h>
  13. #include <asm/arch/mrc.h>
  14. #include <asm/arch/msg_port.h>
  15. #include "mrc_util.h"
  16. #include "hte.h"
  17. #include "smc.h"
  18. /* t_ck clock period in picoseconds per speed index 800, 1066, 1333 */
  19. static const uint32_t t_ck[3] = {
  20. 2500,
  21. 1875,
  22. 1500
  23. };
  24. /* Global variables */
  25. static const uint16_t ddr_wclk[] = {193, 158};
  26. #ifdef BACKUP_WCTL
  27. static const uint16_t ddr_wctl[] = {1, 217};
  28. #endif
  29. #ifdef BACKUP_WCMD
  30. static const uint16_t ddr_wcmd[] = {1, 220};
  31. #endif
  32. #ifdef BACKUP_RCVN
  33. static const uint16_t ddr_rcvn[] = {129, 498};
  34. #endif
  35. #ifdef BACKUP_WDQS
  36. static const uint16_t ddr_wdqs[] = {65, 289};
  37. #endif
  38. #ifdef BACKUP_RDQS
  39. static const uint8_t ddr_rdqs[] = {32, 24};
  40. #endif
  41. #ifdef BACKUP_WDQ
  42. static const uint16_t ddr_wdq[] = {32, 257};
  43. #endif
  44. /* Stop self refresh driven by MCU */
  45. void clear_self_refresh(struct mrc_params *mrc_params)
  46. {
  47. ENTERFN();
  48. /* clear the PMSTS Channel Self Refresh bits */
  49. mrc_write_mask(MEM_CTLR, PMSTS, PMSTS_DISR, PMSTS_DISR);
  50. LEAVEFN();
  51. }
  52. /* It will initialize timing registers in the MCU (DTR0..DTR4) */
  53. void prog_ddr_timing_control(struct mrc_params *mrc_params)
  54. {
  55. uint8_t tcl, wl;
  56. uint8_t trp, trcd, tras, twr, twtr, trrd, trtp, tfaw;
  57. uint32_t tck;
  58. u32 dtr0, dtr1, dtr2, dtr3, dtr4;
  59. u32 tmp1, tmp2;
  60. ENTERFN();
  61. /* mcu_init starts */
  62. mrc_post_code(0x02, 0x00);
  63. dtr0 = msg_port_read(MEM_CTLR, DTR0);
  64. dtr1 = msg_port_read(MEM_CTLR, DTR1);
  65. dtr2 = msg_port_read(MEM_CTLR, DTR2);
  66. dtr3 = msg_port_read(MEM_CTLR, DTR3);
  67. dtr4 = msg_port_read(MEM_CTLR, DTR4);
  68. tck = t_ck[mrc_params->ddr_speed]; /* Clock in picoseconds */
  69. tcl = mrc_params->params.cl; /* CAS latency in clocks */
  70. trp = tcl; /* Per CAT MRC */
  71. trcd = tcl; /* Per CAT MRC */
  72. tras = MCEIL(mrc_params->params.ras, tck);
  73. /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
  74. twr = MCEIL(15000, tck);
  75. twtr = MCEIL(mrc_params->params.wtr, tck);
  76. trrd = MCEIL(mrc_params->params.rrd, tck);
  77. trtp = 4; /* Valid for 800 and 1066, use 5 for 1333 */
  78. tfaw = MCEIL(mrc_params->params.faw, tck);
  79. wl = 5 + mrc_params->ddr_speed;
  80. dtr0 &= ~DTR0_DFREQ_MASK;
  81. dtr0 |= mrc_params->ddr_speed;
  82. dtr0 &= ~DTR0_TCL_MASK;
  83. tmp1 = tcl - 5;
  84. dtr0 |= ((tcl - 5) << 12);
  85. dtr0 &= ~DTR0_TRP_MASK;
  86. dtr0 |= ((trp - 5) << 4); /* 5 bit DRAM Clock */
  87. dtr0 &= ~DTR0_TRCD_MASK;
  88. dtr0 |= ((trcd - 5) << 8); /* 5 bit DRAM Clock */
  89. dtr1 &= ~DTR1_TWCL_MASK;
  90. tmp2 = wl - 3;
  91. dtr1 |= (wl - 3);
  92. dtr1 &= ~DTR1_TWTP_MASK;
  93. dtr1 |= ((wl + 4 + twr - 14) << 8); /* Change to tWTP */
  94. dtr1 &= ~DTR1_TRTP_MASK;
  95. dtr1 |= ((MMAX(trtp, 4) - 3) << 28); /* 4 bit DRAM Clock */
  96. dtr1 &= ~DTR1_TRRD_MASK;
  97. dtr1 |= ((trrd - 4) << 24); /* 4 bit DRAM Clock */
  98. dtr1 &= ~DTR1_TCMD_MASK;
  99. dtr1 |= (1 << 4);
  100. dtr1 &= ~DTR1_TRAS_MASK;
  101. dtr1 |= ((tras - 14) << 20); /* 6 bit DRAM Clock */
  102. dtr1 &= ~DTR1_TFAW_MASK;
  103. dtr1 |= ((((tfaw + 1) >> 1) - 5) << 16);/* 4 bit DRAM Clock */
  104. /* Set 4 Clock CAS to CAS delay (multi-burst) */
  105. dtr1 &= ~DTR1_TCCD_MASK;
  106. dtr2 &= ~DTR2_TRRDR_MASK;
  107. dtr2 |= 1;
  108. dtr2 &= ~DTR2_TWWDR_MASK;
  109. dtr2 |= (2 << 8);
  110. dtr2 &= ~DTR2_TRWDR_MASK;
  111. dtr2 |= (2 << 16);
  112. dtr3 &= ~DTR3_TWRDR_MASK;
  113. dtr3 |= 2;
  114. dtr3 &= ~DTR3_TXXXX_MASK;
  115. dtr3 |= (2 << 4);
  116. dtr3 &= ~DTR3_TRWSR_MASK;
  117. if (mrc_params->ddr_speed == DDRFREQ_800) {
  118. /* Extended RW delay (+1) */
  119. dtr3 |= ((tcl - 5 + 1) << 8);
  120. } else if (mrc_params->ddr_speed == DDRFREQ_1066) {
  121. /* Extended RW delay (+1) */
  122. dtr3 |= ((tcl - 5 + 1) << 8);
  123. }
  124. dtr3 &= ~DTR3_TWRSR_MASK;
  125. dtr3 |= ((4 + wl + twtr - 11) << 13);
  126. dtr3 &= ~DTR3_TXP_MASK;
  127. if (mrc_params->ddr_speed == DDRFREQ_800)
  128. dtr3 |= ((MMAX(0, 1 - 1)) << 22);
  129. else
  130. dtr3 |= ((MMAX(0, 2 - 1)) << 22);
  131. dtr4 &= ~DTR4_WRODTSTRT_MASK;
  132. dtr4 |= 1;
  133. dtr4 &= ~DTR4_WRODTSTOP_MASK;
  134. dtr4 |= (1 << 4);
  135. dtr4 &= ~DTR4_XXXX1_MASK;
  136. dtr4 |= ((1 + tmp1 - tmp2 + 2) << 8);
  137. dtr4 &= ~DTR4_XXXX2_MASK;
  138. dtr4 |= ((1 + tmp1 - tmp2 + 2) << 12);
  139. dtr4 &= ~(DTR4_ODTDIS | DTR4_TRGSTRDIS);
  140. msg_port_write(MEM_CTLR, DTR0, dtr0);
  141. msg_port_write(MEM_CTLR, DTR1, dtr1);
  142. msg_port_write(MEM_CTLR, DTR2, dtr2);
  143. msg_port_write(MEM_CTLR, DTR3, dtr3);
  144. msg_port_write(MEM_CTLR, DTR4, dtr4);
  145. LEAVEFN();
  146. }
  147. /* Configure MCU before jedec init sequence */
  148. void prog_decode_before_jedec(struct mrc_params *mrc_params)
  149. {
  150. u32 drp;
  151. u32 drfc;
  152. u32 dcal;
  153. u32 dsch;
  154. u32 dpmc0;
  155. ENTERFN();
  156. /* Disable power saving features */
  157. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  158. dpmc0 |= (DPMC0_CLKGTDIS | DPMC0_DISPWRDN);
  159. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  160. dpmc0 &= ~DPMC0_DYNSREN;
  161. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  162. /* Disable out of order transactions */
  163. dsch = msg_port_read(MEM_CTLR, DSCH);
  164. dsch |= (DSCH_OOODIS | DSCH_NEWBYPDIS);
  165. msg_port_write(MEM_CTLR, DSCH, dsch);
  166. /* Disable issuing the REF command */
  167. drfc = msg_port_read(MEM_CTLR, DRFC);
  168. drfc &= ~DRFC_TREFI_MASK;
  169. msg_port_write(MEM_CTLR, DRFC, drfc);
  170. /* Disable ZQ calibration short */
  171. dcal = msg_port_read(MEM_CTLR, DCAL);
  172. dcal &= ~DCAL_ZQCINT_MASK;
  173. dcal &= ~DCAL_SRXZQCL_MASK;
  174. msg_port_write(MEM_CTLR, DCAL, dcal);
  175. /*
  176. * Training performed in address mode 0, rank population has limited
  177. * impact, however simulator complains if enabled non-existing rank.
  178. */
  179. drp = 0;
  180. if (mrc_params->rank_enables & 1)
  181. drp |= DRP_RKEN0;
  182. if (mrc_params->rank_enables & 2)
  183. drp |= DRP_RKEN1;
  184. msg_port_write(MEM_CTLR, DRP, drp);
  185. LEAVEFN();
  186. }
  187. /*
  188. * After Cold Reset, BIOS should set COLDWAKE bit to 1 before
  189. * sending the WAKE message to the Dunit.
  190. *
  191. * For Standby Exit, or any other mode in which the DRAM is in
  192. * SR, this bit must be set to 0.
  193. */
  194. void perform_ddr_reset(struct mrc_params *mrc_params)
  195. {
  196. ENTERFN();
  197. /* Set COLDWAKE bit before sending the WAKE message */
  198. mrc_write_mask(MEM_CTLR, DRMC, DRMC_COLDWAKE, DRMC_COLDWAKE);
  199. /* Send wake command to DUNIT (MUST be done before JEDEC) */
  200. dram_wake_command();
  201. /* Set default value */
  202. msg_port_write(MEM_CTLR, DRMC,
  203. mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0);
  204. LEAVEFN();
  205. }
  206. /*
  207. * This function performs some initialization on the DDRIO unit.
  208. * This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
  209. */
  210. void ddrphy_init(struct mrc_params *mrc_params)
  211. {
  212. uint32_t temp;
  213. uint8_t ch; /* channel counter */
  214. uint8_t rk; /* rank counter */
  215. uint8_t bl_grp; /* byte lane group counter (2 BLs per module) */
  216. uint8_t bl_divisor = 1; /* byte lane divisor */
  217. /* For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333 */
  218. uint8_t speed = mrc_params->ddr_speed & 3;
  219. uint8_t cas;
  220. uint8_t cwl;
  221. ENTERFN();
  222. cas = mrc_params->params.cl;
  223. cwl = 5 + mrc_params->ddr_speed;
  224. /* ddrphy_init starts */
  225. mrc_post_code(0x03, 0x00);
  226. /*
  227. * HSD#231531
  228. * Make sure IOBUFACT is deasserted before initializing the DDR PHY
  229. *
  230. * HSD#234845
  231. * Make sure WRPTRENABLE is deasserted before initializing the DDR PHY
  232. */
  233. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  234. if (mrc_params->channel_enables & (1 << ch)) {
  235. /* Deassert DDRPHY Initialization Complete */
  236. mrc_alt_write_mask(DDRPHY,
  237. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  238. ~(1 << 20), 1 << 20); /* SPID_INIT_COMPLETE=0 */
  239. /* Deassert IOBUFACT */
  240. mrc_alt_write_mask(DDRPHY,
  241. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  242. ~(1 << 2), 1 << 2); /* IOBUFACTRST_N=0 */
  243. /* Disable WRPTR */
  244. mrc_alt_write_mask(DDRPHY,
  245. CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
  246. ~(1 << 0), 1 << 0); /* WRPTRENABLE=0 */
  247. }
  248. }
  249. /* Put PHY in reset */
  250. mrc_alt_write_mask(DDRPHY, MASTERRSTN, 0, 1);
  251. /* Initialize DQ01, DQ23, CMD, CLK-CTL, COMP modules */
  252. /* STEP0 */
  253. mrc_post_code(0x03, 0x10);
  254. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  255. if (mrc_params->channel_enables & (1 << ch)) {
  256. /* DQ01-DQ23 */
  257. for (bl_grp = 0;
  258. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  259. bl_grp++) {
  260. /* Analog MUX select - IO2xCLKSEL */
  261. mrc_alt_write_mask(DDRPHY,
  262. DQOBSCKEBBCTL +
  263. bl_grp * DDRIODQ_BL_OFFSET +
  264. ch * DDRIODQ_CH_OFFSET,
  265. bl_grp ? 0 : (1 << 22), 1 << 22);
  266. /* ODT Strength */
  267. switch (mrc_params->rd_odt_value) {
  268. case 1:
  269. temp = 0x3;
  270. break; /* 60 ohm */
  271. case 2:
  272. temp = 0x3;
  273. break; /* 120 ohm */
  274. case 3:
  275. temp = 0x3;
  276. break; /* 180 ohm */
  277. default:
  278. temp = 0x3;
  279. break; /* 120 ohm */
  280. }
  281. /* ODT strength */
  282. mrc_alt_write_mask(DDRPHY,
  283. B0RXIOBUFCTL +
  284. bl_grp * DDRIODQ_BL_OFFSET +
  285. ch * DDRIODQ_CH_OFFSET,
  286. temp << 5, 0x60);
  287. /* ODT strength */
  288. mrc_alt_write_mask(DDRPHY,
  289. B1RXIOBUFCTL +
  290. bl_grp * DDRIODQ_BL_OFFSET +
  291. ch * DDRIODQ_CH_OFFSET,
  292. temp << 5, 0x60);
  293. /* Dynamic ODT/DIFFAMP */
  294. temp = (cas << 24) | (cas << 16) |
  295. (cas << 8) | (cas << 0);
  296. switch (speed) {
  297. case 0:
  298. temp -= 0x01010101;
  299. break; /* 800 */
  300. case 1:
  301. temp -= 0x02020202;
  302. break; /* 1066 */
  303. case 2:
  304. temp -= 0x03030303;
  305. break; /* 1333 */
  306. case 3:
  307. temp -= 0x04040404;
  308. break; /* 1600 */
  309. }
  310. /* Launch Time: ODT, DIFFAMP, ODT, DIFFAMP */
  311. mrc_alt_write_mask(DDRPHY,
  312. B01LATCTL1 +
  313. bl_grp * DDRIODQ_BL_OFFSET +
  314. ch * DDRIODQ_CH_OFFSET,
  315. temp, 0x1f1f1f1f);
  316. switch (speed) {
  317. /* HSD#234715 */
  318. case 0:
  319. temp = (0x06 << 16) | (0x07 << 8);
  320. break; /* 800 */
  321. case 1:
  322. temp = (0x07 << 16) | (0x08 << 8);
  323. break; /* 1066 */
  324. case 2:
  325. temp = (0x09 << 16) | (0x0a << 8);
  326. break; /* 1333 */
  327. case 3:
  328. temp = (0x0a << 16) | (0x0b << 8);
  329. break; /* 1600 */
  330. }
  331. /* On Duration: ODT, DIFFAMP */
  332. mrc_alt_write_mask(DDRPHY,
  333. B0ONDURCTL +
  334. bl_grp * DDRIODQ_BL_OFFSET +
  335. ch * DDRIODQ_CH_OFFSET,
  336. temp, 0x003f3f00);
  337. /* On Duration: ODT, DIFFAMP */
  338. mrc_alt_write_mask(DDRPHY,
  339. B1ONDURCTL +
  340. bl_grp * DDRIODQ_BL_OFFSET +
  341. ch * DDRIODQ_CH_OFFSET,
  342. temp, 0x003f3f00);
  343. switch (mrc_params->rd_odt_value) {
  344. case 0:
  345. /* override DIFFAMP=on, ODT=off */
  346. temp = (0x3f << 16) | (0x3f << 10);
  347. break;
  348. default:
  349. /* override DIFFAMP=on, ODT=on */
  350. temp = (0x3f << 16) | (0x2a << 10);
  351. break;
  352. }
  353. /* Override: DIFFAMP, ODT */
  354. mrc_alt_write_mask(DDRPHY,
  355. B0OVRCTL +
  356. bl_grp * DDRIODQ_BL_OFFSET +
  357. ch * DDRIODQ_CH_OFFSET,
  358. temp, 0x003ffc00);
  359. /* Override: DIFFAMP, ODT */
  360. mrc_alt_write_mask(DDRPHY,
  361. B1OVRCTL +
  362. bl_grp * DDRIODQ_BL_OFFSET +
  363. ch * DDRIODQ_CH_OFFSET,
  364. temp, 0x003ffc00);
  365. /* DLL Setup */
  366. /* 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO) */
  367. mrc_alt_write_mask(DDRPHY,
  368. B0LATCTL0 +
  369. bl_grp * DDRIODQ_BL_OFFSET +
  370. ch * DDRIODQ_CH_OFFSET,
  371. ((cas + 7) << 16) | ((cas - 4) << 8) |
  372. ((cwl - 2) << 0), 0x003f1f1f);
  373. mrc_alt_write_mask(DDRPHY,
  374. B1LATCTL0 +
  375. bl_grp * DDRIODQ_BL_OFFSET +
  376. ch * DDRIODQ_CH_OFFSET,
  377. ((cas + 7) << 16) | ((cas - 4) << 8) |
  378. ((cwl - 2) << 0), 0x003f1f1f);
  379. /* RCVEN Bypass (PO) */
  380. mrc_alt_write_mask(DDRPHY,
  381. B0RXIOBUFCTL +
  382. bl_grp * DDRIODQ_BL_OFFSET +
  383. ch * DDRIODQ_CH_OFFSET,
  384. 0, 0x81);
  385. mrc_alt_write_mask(DDRPHY,
  386. B1RXIOBUFCTL +
  387. bl_grp * DDRIODQ_BL_OFFSET +
  388. ch * DDRIODQ_CH_OFFSET,
  389. 0, 0x81);
  390. /* TX */
  391. mrc_alt_write_mask(DDRPHY,
  392. DQCTL +
  393. bl_grp * DDRIODQ_BL_OFFSET +
  394. ch * DDRIODQ_CH_OFFSET,
  395. 1 << 16, 1 << 16);
  396. mrc_alt_write_mask(DDRPHY,
  397. B01PTRCTL1 +
  398. bl_grp * DDRIODQ_BL_OFFSET +
  399. ch * DDRIODQ_CH_OFFSET,
  400. 1 << 8, 1 << 8);
  401. /* RX (PO) */
  402. /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
  403. mrc_alt_write_mask(DDRPHY,
  404. B0VREFCTL +
  405. bl_grp * DDRIODQ_BL_OFFSET +
  406. ch * DDRIODQ_CH_OFFSET,
  407. (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
  408. 0xff);
  409. /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
  410. mrc_alt_write_mask(DDRPHY,
  411. B1VREFCTL +
  412. bl_grp * DDRIODQ_BL_OFFSET +
  413. ch * DDRIODQ_CH_OFFSET,
  414. (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
  415. 0xff);
  416. /* Per-Bit De-Skew Enable */
  417. mrc_alt_write_mask(DDRPHY,
  418. B0RXIOBUFCTL +
  419. bl_grp * DDRIODQ_BL_OFFSET +
  420. ch * DDRIODQ_CH_OFFSET,
  421. 0, 0x10);
  422. /* Per-Bit De-Skew Enable */
  423. mrc_alt_write_mask(DDRPHY,
  424. B1RXIOBUFCTL +
  425. bl_grp * DDRIODQ_BL_OFFSET +
  426. ch * DDRIODQ_CH_OFFSET,
  427. 0, 0x10);
  428. }
  429. /* CLKEBB */
  430. mrc_alt_write_mask(DDRPHY,
  431. CMDOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
  432. 0, 1 << 23);
  433. /* Enable tristate control of cmd/address bus */
  434. mrc_alt_write_mask(DDRPHY,
  435. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  436. 0, 0x03);
  437. /* ODT RCOMP */
  438. mrc_alt_write_mask(DDRPHY,
  439. CMDRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
  440. (0x03 << 5) | (0x03 << 0), 0x3ff);
  441. /* CMDPM* registers must be programmed in this order */
  442. /* Turn On Delays: SFR (regulator), MPLL */
  443. mrc_alt_write_mask(DDRPHY,
  444. CMDPMDLYREG4 + ch * DDRIOCCC_CH_OFFSET,
  445. 0xffffffff, 0xffffffff);
  446. /*
  447. * Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3,
  448. * VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT
  449. * for_PM_MSG_gt0, MDLL Turn On
  450. */
  451. mrc_alt_write_mask(DDRPHY,
  452. CMDPMDLYREG3 + ch * DDRIOCCC_CH_OFFSET,
  453. 0xfffff616, 0xffffffff);
  454. /* MPLL Divider Reset Delays */
  455. mrc_alt_write_mask(DDRPHY,
  456. CMDPMDLYREG2 + ch * DDRIOCCC_CH_OFFSET,
  457. 0xffffffff, 0xffffffff);
  458. /* Turn Off Delays: VREG, Staggered MDLL, MDLL, PI */
  459. mrc_alt_write_mask(DDRPHY,
  460. CMDPMDLYREG1 + ch * DDRIOCCC_CH_OFFSET,
  461. 0xffffffff, 0xffffffff);
  462. /* Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT */
  463. mrc_alt_write_mask(DDRPHY,
  464. CMDPMDLYREG0 + ch * DDRIOCCC_CH_OFFSET,
  465. 0xffffffff, 0xffffffff);
  466. /* Allow PUnit signals */
  467. mrc_alt_write_mask(DDRPHY,
  468. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  469. (0x6 << 8) | (0x1 << 6) | (0x4 << 0),
  470. 0xffe00f4f);
  471. /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
  472. mrc_alt_write_mask(DDRPHY,
  473. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  474. (0x3 << 4) | (0x7 << 0), 0x7f);
  475. /* CLK-CTL */
  476. mrc_alt_write_mask(DDRPHY,
  477. CCOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
  478. 0, 1 << 24); /* CLKEBB */
  479. /* Buffer Enable: CS,CKE,ODT,CLK */
  480. mrc_alt_write_mask(DDRPHY,
  481. CCCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  482. 0x1f, 0x000ffff1);
  483. /* ODT RCOMP */
  484. mrc_alt_write_mask(DDRPHY,
  485. CCRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
  486. (0x03 << 8) | (0x03 << 0), 0x00001f1f);
  487. /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
  488. mrc_alt_write_mask(DDRPHY,
  489. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  490. (0x3 << 4) | (0x7 << 0), 0x7f);
  491. /*
  492. * COMP (RON channel specific)
  493. * - DQ/DQS/DM RON: 32 Ohm
  494. * - CTRL/CMD RON: 27 Ohm
  495. * - CLK RON: 26 Ohm
  496. */
  497. /* RCOMP Vref PU/PD */
  498. mrc_alt_write_mask(DDRPHY,
  499. DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  500. (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
  501. /* RCOMP Vref PU/PD */
  502. mrc_alt_write_mask(DDRPHY,
  503. CMDVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  504. (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
  505. /* RCOMP Vref PU/PD */
  506. mrc_alt_write_mask(DDRPHY,
  507. CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  508. (0x0F << 24) | (0x03 << 16), 0x3f3f0000);
  509. /* RCOMP Vref PU/PD */
  510. mrc_alt_write_mask(DDRPHY,
  511. DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  512. (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
  513. /* RCOMP Vref PU/PD */
  514. mrc_alt_write_mask(DDRPHY,
  515. CTLVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  516. (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
  517. /* DQS Swapped Input Enable */
  518. mrc_alt_write_mask(DDRPHY,
  519. COMPEN1CH0 + ch * DDRCOMP_CH_OFFSET,
  520. (1 << 19) | (1 << 17), 0xc00ac000);
  521. /* ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50) */
  522. /* ODT Vref PU/PD */
  523. mrc_alt_write_mask(DDRPHY,
  524. DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  525. (0x32 << 8) | (0x03 << 0), 0x00003f3f);
  526. /* ODT Vref PU/PD */
  527. mrc_alt_write_mask(DDRPHY,
  528. DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  529. (0x32 << 8) | (0x03 << 0), 0x00003f3f);
  530. /* ODT Vref PU/PD */
  531. mrc_alt_write_mask(DDRPHY,
  532. CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  533. (0x0E << 8) | (0x05 << 0), 0x00003f3f);
  534. /*
  535. * Slew rate settings are frequency specific,
  536. * numbers below are for 800Mhz (speed == 0)
  537. * - DQ/DQS/DM/CLK SR: 4V/ns,
  538. * - CTRL/CMD SR: 1.5V/ns
  539. */
  540. temp = (0x0e << 16) | (0x0e << 12) | (0x08 << 8) |
  541. (0x0b << 4) | (0x0b << 0);
  542. /* DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ */
  543. mrc_alt_write_mask(DDRPHY,
  544. DLYSELCH0 + ch * DDRCOMP_CH_OFFSET,
  545. temp, 0x000fffff);
  546. /* TCO Vref CLK,DQS,DQ */
  547. mrc_alt_write_mask(DDRPHY,
  548. TCOVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  549. (0x05 << 16) | (0x05 << 8) | (0x05 << 0),
  550. 0x003f3f3f);
  551. /* ODTCOMP CMD/CTL PU/PD */
  552. mrc_alt_write_mask(DDRPHY,
  553. CCBUFODTCH0 + ch * DDRCOMP_CH_OFFSET,
  554. (0x03 << 8) | (0x03 << 0),
  555. 0x00001f1f);
  556. /* COMP */
  557. mrc_alt_write_mask(DDRPHY,
  558. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  559. 0, 0xc0000100);
  560. #ifdef BACKUP_COMPS
  561. /* DQ COMP Overrides */
  562. /* RCOMP PU */
  563. mrc_alt_write_mask(DDRPHY,
  564. DQDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  565. (1 << 31) | (0x0a << 16),
  566. 0x801f0000);
  567. /* RCOMP PD */
  568. mrc_alt_write_mask(DDRPHY,
  569. DQDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  570. (1 << 31) | (0x0a << 16),
  571. 0x801f0000);
  572. /* DCOMP PU */
  573. mrc_alt_write_mask(DDRPHY,
  574. DQDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  575. (1 << 31) | (0x10 << 16),
  576. 0x801f0000);
  577. /* DCOMP PD */
  578. mrc_alt_write_mask(DDRPHY,
  579. DQDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  580. (1 << 31) | (0x10 << 16),
  581. 0x801f0000);
  582. /* ODTCOMP PU */
  583. mrc_alt_write_mask(DDRPHY,
  584. DQODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  585. (1 << 31) | (0x0b << 16),
  586. 0x801f0000);
  587. /* ODTCOMP PD */
  588. mrc_alt_write_mask(DDRPHY,
  589. DQODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  590. (1 << 31) | (0x0b << 16),
  591. 0x801f0000);
  592. /* TCOCOMP PU */
  593. mrc_alt_write_mask(DDRPHY,
  594. DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  595. 1 << 31, 1 << 31);
  596. /* TCOCOMP PD */
  597. mrc_alt_write_mask(DDRPHY,
  598. DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  599. 1 << 31, 1 << 31);
  600. /* DQS COMP Overrides */
  601. /* RCOMP PU */
  602. mrc_alt_write_mask(DDRPHY,
  603. DQSDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  604. (1 << 31) | (0x0a << 16),
  605. 0x801f0000);
  606. /* RCOMP PD */
  607. mrc_alt_write_mask(DDRPHY,
  608. DQSDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  609. (1 << 31) | (0x0a << 16),
  610. 0x801f0000);
  611. /* DCOMP PU */
  612. mrc_alt_write_mask(DDRPHY,
  613. DQSDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  614. (1 << 31) | (0x10 << 16),
  615. 0x801f0000);
  616. /* DCOMP PD */
  617. mrc_alt_write_mask(DDRPHY,
  618. DQSDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  619. (1 << 31) | (0x10 << 16),
  620. 0x801f0000);
  621. /* ODTCOMP PU */
  622. mrc_alt_write_mask(DDRPHY,
  623. DQSODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  624. (1 << 31) | (0x0b << 16),
  625. 0x801f0000);
  626. /* ODTCOMP PD */
  627. mrc_alt_write_mask(DDRPHY,
  628. DQSODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  629. (1 << 31) | (0x0b << 16),
  630. 0x801f0000);
  631. /* TCOCOMP PU */
  632. mrc_alt_write_mask(DDRPHY,
  633. DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  634. 1 << 31, 1 << 31);
  635. /* TCOCOMP PD */
  636. mrc_alt_write_mask(DDRPHY,
  637. DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  638. 1 << 31, 1 << 31);
  639. /* CLK COMP Overrides */
  640. /* RCOMP PU */
  641. mrc_alt_write_mask(DDRPHY,
  642. CLKDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  643. (1 << 31) | (0x0c << 16),
  644. 0x801f0000);
  645. /* RCOMP PD */
  646. mrc_alt_write_mask(DDRPHY,
  647. CLKDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  648. (1 << 31) | (0x0c << 16),
  649. 0x801f0000);
  650. /* DCOMP PU */
  651. mrc_alt_write_mask(DDRPHY,
  652. CLKDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  653. (1 << 31) | (0x07 << 16),
  654. 0x801f0000);
  655. /* DCOMP PD */
  656. mrc_alt_write_mask(DDRPHY,
  657. CLKDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  658. (1 << 31) | (0x07 << 16),
  659. 0x801f0000);
  660. /* ODTCOMP PU */
  661. mrc_alt_write_mask(DDRPHY,
  662. CLKODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  663. (1 << 31) | (0x0b << 16),
  664. 0x801f0000);
  665. /* ODTCOMP PD */
  666. mrc_alt_write_mask(DDRPHY,
  667. CLKODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  668. (1 << 31) | (0x0b << 16),
  669. 0x801f0000);
  670. /* TCOCOMP PU */
  671. mrc_alt_write_mask(DDRPHY,
  672. CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  673. 1 << 31, 1 << 31);
  674. /* TCOCOMP PD */
  675. mrc_alt_write_mask(DDRPHY,
  676. CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  677. 1 << 31, 1 << 31);
  678. /* CMD COMP Overrides */
  679. /* RCOMP PU */
  680. mrc_alt_write_mask(DDRPHY,
  681. CMDDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  682. (1 << 31) | (0x0d << 16),
  683. 0x803f0000);
  684. /* RCOMP PD */
  685. mrc_alt_write_mask(DDRPHY,
  686. CMDDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  687. (1 << 31) | (0x0d << 16),
  688. 0x803f0000);
  689. /* DCOMP PU */
  690. mrc_alt_write_mask(DDRPHY,
  691. CMDDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  692. (1 << 31) | (0x0a << 16),
  693. 0x801f0000);
  694. /* DCOMP PD */
  695. mrc_alt_write_mask(DDRPHY,
  696. CMDDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  697. (1 << 31) | (0x0a << 16),
  698. 0x801f0000);
  699. /* CTL COMP Overrides */
  700. /* RCOMP PU */
  701. mrc_alt_write_mask(DDRPHY,
  702. CTLDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  703. (1 << 31) | (0x0d << 16),
  704. 0x803f0000);
  705. /* RCOMP PD */
  706. mrc_alt_write_mask(DDRPHY,
  707. CTLDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  708. (1 << 31) | (0x0d << 16),
  709. 0x803f0000);
  710. /* DCOMP PU */
  711. mrc_alt_write_mask(DDRPHY,
  712. CTLDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  713. (1 << 31) | (0x0a << 16),
  714. 0x801f0000);
  715. /* DCOMP PD */
  716. mrc_alt_write_mask(DDRPHY,
  717. CTLDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  718. (1 << 31) | (0x0a << 16),
  719. 0x801f0000);
  720. #else
  721. /* DQ TCOCOMP Overrides */
  722. /* TCOCOMP PU */
  723. mrc_alt_write_mask(DDRPHY,
  724. DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  725. (1 << 31) | (0x1f << 16),
  726. 0x801f0000);
  727. /* TCOCOMP PD */
  728. mrc_alt_write_mask(DDRPHY,
  729. DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  730. (1 << 31) | (0x1f << 16),
  731. 0x801f0000);
  732. /* DQS TCOCOMP Overrides */
  733. /* TCOCOMP PU */
  734. mrc_alt_write_mask(DDRPHY,
  735. DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  736. (1 << 31) | (0x1f << 16),
  737. 0x801f0000);
  738. /* TCOCOMP PD */
  739. mrc_alt_write_mask(DDRPHY,
  740. DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  741. (1 << 31) | (0x1f << 16),
  742. 0x801f0000);
  743. /* CLK TCOCOMP Overrides */
  744. /* TCOCOMP PU */
  745. mrc_alt_write_mask(DDRPHY,
  746. CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  747. (1 << 31) | (0x1f << 16),
  748. 0x801f0000);
  749. /* TCOCOMP PD */
  750. mrc_alt_write_mask(DDRPHY,
  751. CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  752. (1 << 31) | (0x1f << 16),
  753. 0x801f0000);
  754. #endif
  755. /* program STATIC delays */
  756. #ifdef BACKUP_WCMD
  757. set_wcmd(ch, ddr_wcmd[PLATFORM_ID]);
  758. #else
  759. set_wcmd(ch, ddr_wclk[PLATFORM_ID] + HALF_CLK);
  760. #endif
  761. for (rk = 0; rk < NUM_RANKS; rk++) {
  762. if (mrc_params->rank_enables & (1 << rk)) {
  763. set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]);
  764. #ifdef BACKUP_WCTL
  765. set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]);
  766. #else
  767. set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK);
  768. #endif
  769. }
  770. }
  771. }
  772. }
  773. /* COMP (non channel specific) */
  774. /* RCOMP: Dither PU Enable */
  775. mrc_alt_write_mask(DDRPHY, DQANADRVPUCTL, 1 << 30, 1 << 30);
  776. /* RCOMP: Dither PD Enable */
  777. mrc_alt_write_mask(DDRPHY, DQANADRVPDCTL, 1 << 30, 1 << 30);
  778. /* RCOMP: Dither PU Enable */
  779. mrc_alt_write_mask(DDRPHY, CMDANADRVPUCTL, 1 << 30, 1 << 30);
  780. /* RCOMP: Dither PD Enable */
  781. mrc_alt_write_mask(DDRPHY, CMDANADRVPDCTL, 1 << 30, 1 << 30);
  782. /* RCOMP: Dither PU Enable */
  783. mrc_alt_write_mask(DDRPHY, CLKANADRVPUCTL, 1 << 30, 1 << 30);
  784. /* RCOMP: Dither PD Enable */
  785. mrc_alt_write_mask(DDRPHY, CLKANADRVPDCTL, 1 << 30, 1 << 30);
  786. /* RCOMP: Dither PU Enable */
  787. mrc_alt_write_mask(DDRPHY, DQSANADRVPUCTL, 1 << 30, 1 << 30);
  788. /* RCOMP: Dither PD Enable */
  789. mrc_alt_write_mask(DDRPHY, DQSANADRVPDCTL, 1 << 30, 1 << 30);
  790. /* RCOMP: Dither PU Enable */
  791. mrc_alt_write_mask(DDRPHY, CTLANADRVPUCTL, 1 << 30, 1 << 30);
  792. /* RCOMP: Dither PD Enable */
  793. mrc_alt_write_mask(DDRPHY, CTLANADRVPDCTL, 1 << 30, 1 << 30);
  794. /* ODT: Dither PU Enable */
  795. mrc_alt_write_mask(DDRPHY, DQANAODTPUCTL, 1 << 30, 1 << 30);
  796. /* ODT: Dither PD Enable */
  797. mrc_alt_write_mask(DDRPHY, DQANAODTPDCTL, 1 << 30, 1 << 30);
  798. /* ODT: Dither PU Enable */
  799. mrc_alt_write_mask(DDRPHY, CLKANAODTPUCTL, 1 << 30, 1 << 30);
  800. /* ODT: Dither PD Enable */
  801. mrc_alt_write_mask(DDRPHY, CLKANAODTPDCTL, 1 << 30, 1 << 30);
  802. /* ODT: Dither PU Enable */
  803. mrc_alt_write_mask(DDRPHY, DQSANAODTPUCTL, 1 << 30, 1 << 30);
  804. /* ODT: Dither PD Enable */
  805. mrc_alt_write_mask(DDRPHY, DQSANAODTPDCTL, 1 << 30, 1 << 30);
  806. /* DCOMP: Dither PU Enable */
  807. mrc_alt_write_mask(DDRPHY, DQANADLYPUCTL, 1 << 30, 1 << 30);
  808. /* DCOMP: Dither PD Enable */
  809. mrc_alt_write_mask(DDRPHY, DQANADLYPDCTL, 1 << 30, 1 << 30);
  810. /* DCOMP: Dither PU Enable */
  811. mrc_alt_write_mask(DDRPHY, CMDANADLYPUCTL, 1 << 30, 1 << 30);
  812. /* DCOMP: Dither PD Enable */
  813. mrc_alt_write_mask(DDRPHY, CMDANADLYPDCTL, 1 << 30, 1 << 30);
  814. /* DCOMP: Dither PU Enable */
  815. mrc_alt_write_mask(DDRPHY, CLKANADLYPUCTL, 1 << 30, 1 << 30);
  816. /* DCOMP: Dither PD Enable */
  817. mrc_alt_write_mask(DDRPHY, CLKANADLYPDCTL, 1 << 30, 1 << 30);
  818. /* DCOMP: Dither PU Enable */
  819. mrc_alt_write_mask(DDRPHY, DQSANADLYPUCTL, 1 << 30, 1 << 30);
  820. /* DCOMP: Dither PD Enable */
  821. mrc_alt_write_mask(DDRPHY, DQSANADLYPDCTL, 1 << 30, 1 << 30);
  822. /* DCOMP: Dither PU Enable */
  823. mrc_alt_write_mask(DDRPHY, CTLANADLYPUCTL, 1 << 30, 1 << 30);
  824. /* DCOMP: Dither PD Enable */
  825. mrc_alt_write_mask(DDRPHY, CTLANADLYPDCTL, 1 << 30, 1 << 30);
  826. /* TCO: Dither PU Enable */
  827. mrc_alt_write_mask(DDRPHY, DQANATCOPUCTL, 1 << 30, 1 << 30);
  828. /* TCO: Dither PD Enable */
  829. mrc_alt_write_mask(DDRPHY, DQANATCOPDCTL, 1 << 30, 1 << 30);
  830. /* TCO: Dither PU Enable */
  831. mrc_alt_write_mask(DDRPHY, CLKANATCOPUCTL, 1 << 30, 1 << 30);
  832. /* TCO: Dither PD Enable */
  833. mrc_alt_write_mask(DDRPHY, CLKANATCOPDCTL, 1 << 30, 1 << 30);
  834. /* TCO: Dither PU Enable */
  835. mrc_alt_write_mask(DDRPHY, DQSANATCOPUCTL, 1 << 30, 1 << 30);
  836. /* TCO: Dither PD Enable */
  837. mrc_alt_write_mask(DDRPHY, DQSANATCOPDCTL, 1 << 30, 1 << 30);
  838. /* TCOCOMP: Pulse Count */
  839. mrc_alt_write_mask(DDRPHY, TCOCNTCTRL, 1, 3);
  840. /* ODT: CMD/CTL PD/PU */
  841. mrc_alt_write_mask(DDRPHY, CHNLBUFSTATIC,
  842. (0x03 << 24) | (0x03 << 16), 0x1f1f0000);
  843. /* Set 1us counter */
  844. mrc_alt_write_mask(DDRPHY, MSCNTR, 0x64, 0xff);
  845. mrc_alt_write_mask(DDRPHY, LATCH1CTL, 0x1 << 28, 0x70000000);
  846. /* Release PHY from reset */
  847. mrc_alt_write_mask(DDRPHY, MASTERRSTN, 1, 1);
  848. /* STEP1 */
  849. mrc_post_code(0x03, 0x11);
  850. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  851. if (mrc_params->channel_enables & (1 << ch)) {
  852. /* DQ01-DQ23 */
  853. for (bl_grp = 0;
  854. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  855. bl_grp++) {
  856. mrc_alt_write_mask(DDRPHY,
  857. DQMDLLCTL +
  858. bl_grp * DDRIODQ_BL_OFFSET +
  859. ch * DDRIODQ_CH_OFFSET,
  860. 1 << 13,
  861. 1 << 13); /* Enable VREG */
  862. delay_n(3);
  863. }
  864. /* ECC */
  865. mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
  866. 1 << 13, 1 << 13); /* Enable VREG */
  867. delay_n(3);
  868. /* CMD */
  869. mrc_alt_write_mask(DDRPHY,
  870. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  871. 1 << 13, 1 << 13); /* Enable VREG */
  872. delay_n(3);
  873. /* CLK-CTL */
  874. mrc_alt_write_mask(DDRPHY,
  875. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  876. 1 << 13, 1 << 13); /* Enable VREG */
  877. delay_n(3);
  878. }
  879. }
  880. /* STEP2 */
  881. mrc_post_code(0x03, 0x12);
  882. delay_n(200);
  883. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  884. if (mrc_params->channel_enables & (1 << ch)) {
  885. /* DQ01-DQ23 */
  886. for (bl_grp = 0;
  887. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  888. bl_grp++) {
  889. mrc_alt_write_mask(DDRPHY,
  890. DQMDLLCTL +
  891. bl_grp * DDRIODQ_BL_OFFSET +
  892. ch * DDRIODQ_CH_OFFSET,
  893. 1 << 17,
  894. 1 << 17); /* Enable MCDLL */
  895. delay_n(50);
  896. }
  897. /* ECC */
  898. mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
  899. 1 << 17, 1 << 17); /* Enable MCDLL */
  900. delay_n(50);
  901. /* CMD */
  902. mrc_alt_write_mask(DDRPHY,
  903. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  904. 1 << 18, 1 << 18); /* Enable MCDLL */
  905. delay_n(50);
  906. /* CLK-CTL */
  907. mrc_alt_write_mask(DDRPHY,
  908. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  909. 1 << 18, 1 << 18); /* Enable MCDLL */
  910. delay_n(50);
  911. }
  912. }
  913. /* STEP3: */
  914. mrc_post_code(0x03, 0x13);
  915. delay_n(100);
  916. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  917. if (mrc_params->channel_enables & (1 << ch)) {
  918. /* DQ01-DQ23 */
  919. for (bl_grp = 0;
  920. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  921. bl_grp++) {
  922. #ifdef FORCE_16BIT_DDRIO
  923. temp = (bl_grp &&
  924. (mrc_params->channel_width == X16)) ?
  925. 0x11ff : 0xffff;
  926. #else
  927. temp = 0xffff;
  928. #endif
  929. /* Enable TXDLL */
  930. mrc_alt_write_mask(DDRPHY,
  931. DQDLLTXCTL +
  932. bl_grp * DDRIODQ_BL_OFFSET +
  933. ch * DDRIODQ_CH_OFFSET,
  934. temp, 0xffff);
  935. delay_n(3);
  936. /* Enable RXDLL */
  937. mrc_alt_write_mask(DDRPHY,
  938. DQDLLRXCTL +
  939. bl_grp * DDRIODQ_BL_OFFSET +
  940. ch * DDRIODQ_CH_OFFSET,
  941. 0xf, 0xf);
  942. delay_n(3);
  943. /* Enable RXDLL Overrides BL0 */
  944. mrc_alt_write_mask(DDRPHY,
  945. B0OVRCTL +
  946. bl_grp * DDRIODQ_BL_OFFSET +
  947. ch * DDRIODQ_CH_OFFSET,
  948. 0xf, 0xf);
  949. }
  950. /* ECC */
  951. temp = 0xffff;
  952. mrc_alt_write_mask(DDRPHY, ECCDLLTXCTL,
  953. temp, 0xffff);
  954. delay_n(3);
  955. /* CMD (PO) */
  956. mrc_alt_write_mask(DDRPHY,
  957. CMDDLLTXCTL + ch * DDRIOCCC_CH_OFFSET,
  958. temp, 0xffff);
  959. delay_n(3);
  960. }
  961. }
  962. /* STEP4 */
  963. mrc_post_code(0x03, 0x14);
  964. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  965. if (mrc_params->channel_enables & (1 << ch)) {
  966. /* Host To Memory Clock Alignment (HMC) for 800/1066 */
  967. for (bl_grp = 0;
  968. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  969. bl_grp++) {
  970. /* CLK_ALIGN_MOD_ID */
  971. mrc_alt_write_mask(DDRPHY,
  972. DQCLKALIGNREG2 +
  973. bl_grp * DDRIODQ_BL_OFFSET +
  974. ch * DDRIODQ_CH_OFFSET,
  975. bl_grp ? 3 : 1,
  976. 0xf);
  977. }
  978. mrc_alt_write_mask(DDRPHY,
  979. ECCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  980. 0x2, 0xf);
  981. mrc_alt_write_mask(DDRPHY,
  982. CMDCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  983. 0x0, 0xf);
  984. mrc_alt_write_mask(DDRPHY,
  985. CCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  986. 0x2, 0xf);
  987. mrc_alt_write_mask(DDRPHY,
  988. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
  989. 0x20, 0x30);
  990. /*
  991. * NUM_SAMPLES, MAX_SAMPLES,
  992. * MACRO_PI_STEP, MICRO_PI_STEP
  993. */
  994. mrc_alt_write_mask(DDRPHY,
  995. CMDCLKALIGNREG1 + ch * DDRIOCCC_CH_OFFSET,
  996. (0x18 << 16) | (0x10 << 8) |
  997. (0x8 << 2) | (0x1 << 0),
  998. 0x007f7fff);
  999. /* TOTAL_NUM_MODULES, FIRST_U_PARTITION */
  1000. mrc_alt_write_mask(DDRPHY,
  1001. CMDCLKALIGNREG2 + ch * DDRIOCCC_CH_OFFSET,
  1002. (0x10 << 16) | (0x4 << 8) | (0x2 << 4),
  1003. 0x001f0ff0);
  1004. #ifdef HMC_TEST
  1005. /* START_CLK_ALIGN=1 */
  1006. mrc_alt_write_mask(DDRPHY,
  1007. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
  1008. 1 << 24, 1 << 24);
  1009. while (msg_port_alt_read(DDRPHY,
  1010. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET) &
  1011. (1 << 24))
  1012. ; /* wait for START_CLK_ALIGN=0 */
  1013. #endif
  1014. /* Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN */
  1015. mrc_alt_write_mask(DDRPHY,
  1016. CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
  1017. 1, 1); /* WRPTRENABLE=1 */
  1018. /* COMP initial */
  1019. /* enable bypass for CLK buffer (PO) */
  1020. mrc_alt_write_mask(DDRPHY,
  1021. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  1022. 1 << 5, 1 << 5);
  1023. /* Initial COMP Enable */
  1024. mrc_alt_write_mask(DDRPHY, CMPCTRL, 1, 1);
  1025. /* wait for Initial COMP Enable = 0 */
  1026. while (msg_port_alt_read(DDRPHY, CMPCTRL) & 1)
  1027. ;
  1028. /* disable bypass for CLK buffer (PO) */
  1029. mrc_alt_write_mask(DDRPHY,
  1030. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  1031. ~(1 << 5), 1 << 5);
  1032. /* IOBUFACT */
  1033. /* STEP4a */
  1034. mrc_alt_write_mask(DDRPHY,
  1035. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  1036. 1 << 2, 1 << 2); /* IOBUFACTRST_N=1 */
  1037. /* DDRPHY initialization complete */
  1038. mrc_alt_write_mask(DDRPHY,
  1039. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  1040. 1 << 20, 1 << 20); /* SPID_INIT_COMPLETE=1 */
  1041. }
  1042. }
  1043. LEAVEFN();
  1044. }
  1045. /* This function performs JEDEC initialization on all enabled channels */
  1046. void perform_jedec_init(struct mrc_params *mrc_params)
  1047. {
  1048. uint8_t twr, wl, rank;
  1049. uint32_t tck;
  1050. u32 dtr0;
  1051. u32 drp;
  1052. u32 drmc;
  1053. u32 mrs0_cmd = 0;
  1054. u32 emrs1_cmd = 0;
  1055. u32 emrs2_cmd = 0;
  1056. u32 emrs3_cmd = 0;
  1057. ENTERFN();
  1058. /* jedec_init starts */
  1059. mrc_post_code(0x04, 0x00);
  1060. /* DDR3_RESET_SET=0, DDR3_RESET_RESET=1 */
  1061. mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 2, 0x102);
  1062. /* Assert RESET# for 200us */
  1063. delay_u(200);
  1064. /* DDR3_RESET_SET=1, DDR3_RESET_RESET=0 */
  1065. mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 0x100, 0x102);
  1066. dtr0 = msg_port_read(MEM_CTLR, DTR0);
  1067. /*
  1068. * Set CKEVAL for populated ranks
  1069. * then send NOP to each rank (#4550197)
  1070. */
  1071. drp = msg_port_read(MEM_CTLR, DRP);
  1072. drp &= 0x3;
  1073. drmc = msg_port_read(MEM_CTLR, DRMC);
  1074. drmc &= 0xfffffffc;
  1075. drmc |= (DRMC_CKEMODE | drp);
  1076. msg_port_write(MEM_CTLR, DRMC, drmc);
  1077. for (rank = 0; rank < NUM_RANKS; rank++) {
  1078. /* Skip to next populated rank */
  1079. if ((mrc_params->rank_enables & (1 << rank)) == 0)
  1080. continue;
  1081. dram_init_command(DCMD_NOP(rank));
  1082. }
  1083. msg_port_write(MEM_CTLR, DRMC,
  1084. (mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0));
  1085. /*
  1086. * setup for emrs 2
  1087. * BIT[15:11] --> Always "0"
  1088. * BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
  1089. * BIT[08] --> Always "0"
  1090. * BIT[07] --> SRT: use sr_temp_range
  1091. * BIT[06] --> ASR: want "Manual SR Reference" (0)
  1092. * BIT[05:03] --> CWL: use oem_tCWL
  1093. * BIT[02:00] --> PASR: want "Full Array" (0)
  1094. */
  1095. emrs2_cmd |= (2 << 3);
  1096. wl = 5 + mrc_params->ddr_speed;
  1097. emrs2_cmd |= ((wl - 5) << 9);
  1098. emrs2_cmd |= (mrc_params->sr_temp_range << 13);
  1099. /*
  1100. * setup for emrs 3
  1101. * BIT[15:03] --> Always "0"
  1102. * BIT[02] --> MPR: want "Normal Operation" (0)
  1103. * BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
  1104. */
  1105. emrs3_cmd |= (3 << 3);
  1106. /*
  1107. * setup for emrs 1
  1108. * BIT[15:13] --> Always "0"
  1109. * BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)
  1110. * BIT[11:11] --> TDQS: want "Disabled" (0)
  1111. * BIT[10:10] --> Always "0"
  1112. * BIT[09,06,02] --> Rtt_nom: use rtt_nom_value
  1113. * BIT[08] --> Always "0"
  1114. * BIT[07] --> WR_LVL: want "Disabled" (0)
  1115. * BIT[05,01] --> DIC: use ron_value
  1116. * BIT[04:03] --> AL: additive latency want "0" (0)
  1117. * BIT[00] --> DLL: want "Enable" (0)
  1118. *
  1119. * (BIT5|BIT1) set Ron value
  1120. * 00 --> RZQ/6 (40ohm)
  1121. * 01 --> RZQ/7 (34ohm)
  1122. * 1* --> RESERVED
  1123. *
  1124. * (BIT9|BIT6|BIT2) set Rtt_nom value
  1125. * 000 --> Disabled
  1126. * 001 --> RZQ/4 ( 60ohm)
  1127. * 010 --> RZQ/2 (120ohm)
  1128. * 011 --> RZQ/6 ( 40ohm)
  1129. * 1** --> RESERVED
  1130. */
  1131. emrs1_cmd |= (1 << 3);
  1132. emrs1_cmd &= ~(1 << 6);
  1133. if (mrc_params->ron_value == 0)
  1134. emrs1_cmd |= (1 << 7);
  1135. else
  1136. emrs1_cmd &= ~(1 << 7);
  1137. if (mrc_params->rtt_nom_value == 0)
  1138. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_40 << 6);
  1139. else if (mrc_params->rtt_nom_value == 1)
  1140. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_60 << 6);
  1141. else if (mrc_params->rtt_nom_value == 2)
  1142. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_120 << 6);
  1143. /* save MRS1 value (excluding control fields) */
  1144. mrc_params->mrs1 = emrs1_cmd >> 6;
  1145. /*
  1146. * setup for mrs 0
  1147. * BIT[15:13] --> Always "0"
  1148. * BIT[12] --> PPD: for Quark (1)
  1149. * BIT[11:09] --> WR: use oem_tWR
  1150. * BIT[08] --> DLL: want "Reset" (1, self clearing)
  1151. * BIT[07] --> MODE: want "Normal" (0)
  1152. * BIT[06:04,02] --> CL: use oem_tCAS
  1153. * BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)
  1154. * BIT[01:00] --> BL: want "8 Fixed" (0)
  1155. * WR:
  1156. * 0 --> 16
  1157. * 1 --> 5
  1158. * 2 --> 6
  1159. * 3 --> 7
  1160. * 4 --> 8
  1161. * 5 --> 10
  1162. * 6 --> 12
  1163. * 7 --> 14
  1164. * CL:
  1165. * BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
  1166. * BIT[06:04] use oem_tCAS-4
  1167. */
  1168. mrs0_cmd |= (1 << 14);
  1169. mrs0_cmd |= (1 << 18);
  1170. mrs0_cmd |= ((((dtr0 >> 12) & 7) + 1) << 10);
  1171. tck = t_ck[mrc_params->ddr_speed];
  1172. /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
  1173. twr = MCEIL(15000, tck);
  1174. mrs0_cmd |= ((twr - 4) << 15);
  1175. for (rank = 0; rank < NUM_RANKS; rank++) {
  1176. /* Skip to next populated rank */
  1177. if ((mrc_params->rank_enables & (1 << rank)) == 0)
  1178. continue;
  1179. emrs2_cmd |= (rank << 22);
  1180. dram_init_command(emrs2_cmd);
  1181. emrs3_cmd |= (rank << 22);
  1182. dram_init_command(emrs3_cmd);
  1183. emrs1_cmd |= (rank << 22);
  1184. dram_init_command(emrs1_cmd);
  1185. mrs0_cmd |= (rank << 22);
  1186. dram_init_command(mrs0_cmd);
  1187. dram_init_command(DCMD_ZQCL(rank));
  1188. }
  1189. LEAVEFN();
  1190. }
  1191. /*
  1192. * Dunit Initialization Complete
  1193. *
  1194. * Indicates that initialization of the Dunit has completed.
  1195. *
  1196. * Memory accesses are permitted and maintenance operation begins.
  1197. * Until this bit is set to a 1, the memory controller will not accept
  1198. * DRAM requests from the MEMORY_MANAGER or HTE.
  1199. */
  1200. void set_ddr_init_complete(struct mrc_params *mrc_params)
  1201. {
  1202. u32 dco;
  1203. ENTERFN();
  1204. dco = msg_port_read(MEM_CTLR, DCO);
  1205. dco &= ~DCO_PMICTL;
  1206. dco |= DCO_IC;
  1207. msg_port_write(MEM_CTLR, DCO, dco);
  1208. LEAVEFN();
  1209. }
  1210. /*
  1211. * This function will retrieve relevant timing data
  1212. *
  1213. * This data will be used on subsequent boots to speed up boot times
  1214. * and is required for Suspend To RAM capabilities.
  1215. */
  1216. void restore_timings(struct mrc_params *mrc_params)
  1217. {
  1218. uint8_t ch, rk, bl;
  1219. const struct mrc_timings *mt = &mrc_params->timings;
  1220. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1221. for (rk = 0; rk < NUM_RANKS; rk++) {
  1222. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  1223. set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]);
  1224. set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]);
  1225. set_wdqs(ch, rk, bl, mt->wdqs[ch][rk][bl]);
  1226. set_wdq(ch, rk, bl, mt->wdq[ch][rk][bl]);
  1227. if (rk == 0) {
  1228. /* VREF (RANK0 only) */
  1229. set_vref(ch, bl, mt->vref[ch][bl]);
  1230. }
  1231. }
  1232. set_wctl(ch, rk, mt->wctl[ch][rk]);
  1233. }
  1234. set_wcmd(ch, mt->wcmd[ch]);
  1235. }
  1236. }
  1237. /*
  1238. * Configure default settings normally set as part of read training
  1239. *
  1240. * Some defaults have to be set earlier as they may affect earlier
  1241. * training steps.
  1242. */
  1243. void default_timings(struct mrc_params *mrc_params)
  1244. {
  1245. uint8_t ch, rk, bl;
  1246. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1247. for (rk = 0; rk < NUM_RANKS; rk++) {
  1248. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  1249. set_rdqs(ch, rk, bl, 24);
  1250. if (rk == 0) {
  1251. /* VREF (RANK0 only) */
  1252. set_vref(ch, bl, 32);
  1253. }
  1254. }
  1255. }
  1256. }
  1257. }
  1258. /*
  1259. * This function will perform our RCVEN Calibration Algorithm.
  1260. * We will only use the 2xCLK domain timings to perform RCVEN Calibration.
  1261. * All byte lanes will be calibrated "simultaneously" per channel per rank.
  1262. */
  1263. void rcvn_cal(struct mrc_params *mrc_params)
  1264. {
  1265. uint8_t ch; /* channel counter */
  1266. uint8_t rk; /* rank counter */
  1267. uint8_t bl; /* byte lane counter */
  1268. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1269. #ifdef R2R_SHARING
  1270. /* used to find placement for rank2rank sharing configs */
  1271. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1272. #ifndef BACKUP_RCVN
  1273. /* used to find placement for rank2rank sharing configs */
  1274. uint32_t num_ranks_enabled = 0;
  1275. #endif
  1276. #endif
  1277. #ifdef BACKUP_RCVN
  1278. #else
  1279. uint32_t temp;
  1280. /* absolute PI value to be programmed on the byte lane */
  1281. uint32_t delay[NUM_BYTE_LANES];
  1282. u32 dtr1, dtr1_save;
  1283. #endif
  1284. ENTERFN();
  1285. /* rcvn_cal starts */
  1286. mrc_post_code(0x05, 0x00);
  1287. #ifndef BACKUP_RCVN
  1288. /* need separate burst to sample DQS preamble */
  1289. dtr1 = msg_port_read(MEM_CTLR, DTR1);
  1290. dtr1_save = dtr1;
  1291. dtr1 |= DTR1_TCCD_12CLK;
  1292. msg_port_write(MEM_CTLR, DTR1, dtr1);
  1293. #endif
  1294. #ifdef R2R_SHARING
  1295. /* need to set "final_delay[][]" elements to "0" */
  1296. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1297. #endif
  1298. /* loop through each enabled channel */
  1299. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1300. if (mrc_params->channel_enables & (1 << ch)) {
  1301. /* perform RCVEN Calibration on a per rank basis */
  1302. for (rk = 0; rk < NUM_RANKS; rk++) {
  1303. if (mrc_params->rank_enables & (1 << rk)) {
  1304. /*
  1305. * POST_CODE here indicates the current
  1306. * channel and rank being calibrated
  1307. */
  1308. mrc_post_code(0x05, 0x10 + ((ch << 4) | rk));
  1309. #ifdef BACKUP_RCVN
  1310. /* et hard-coded timing values */
  1311. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++)
  1312. set_rcvn(ch, rk, bl, ddr_rcvn[PLATFORM_ID]);
  1313. #else
  1314. /* enable FIFORST */
  1315. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
  1316. mrc_alt_write_mask(DDRPHY,
  1317. B01PTRCTL1 +
  1318. (bl >> 1) * DDRIODQ_BL_OFFSET +
  1319. ch * DDRIODQ_CH_OFFSET,
  1320. 0, 1 << 8);
  1321. }
  1322. /* initialize the starting delay to 128 PI (cas +1 CLK) */
  1323. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1324. /* 1x CLK domain timing is cas-4 */
  1325. delay[bl] = (4 + 1) * FULL_CLK;
  1326. set_rcvn(ch, rk, bl, delay[bl]);
  1327. }
  1328. /* now find the rising edge */
  1329. find_rising_edge(mrc_params, delay, ch, rk, true);
  1330. /* Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse */
  1331. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1332. delay[bl] += QRTR_CLK;
  1333. set_rcvn(ch, rk, bl, delay[bl]);
  1334. }
  1335. /* Now decrement delay by 128 PI (1 CLK) until we sample a "0" */
  1336. do {
  1337. temp = sample_dqs(mrc_params, ch, rk, true);
  1338. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1339. if (temp & (1 << bl)) {
  1340. if (delay[bl] >= FULL_CLK) {
  1341. delay[bl] -= FULL_CLK;
  1342. set_rcvn(ch, rk, bl, delay[bl]);
  1343. } else {
  1344. /* not enough delay */
  1345. training_message(ch, rk, bl);
  1346. mrc_post_code(0xee, 0x50);
  1347. }
  1348. }
  1349. }
  1350. } while (temp & 0xff);
  1351. #ifdef R2R_SHARING
  1352. /* increment "num_ranks_enabled" */
  1353. num_ranks_enabled++;
  1354. /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
  1355. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1356. delay[bl] += QRTR_CLK;
  1357. /* add "delay[]" values to "final_delay[][]" for rolling average */
  1358. final_delay[ch][bl] += delay[bl];
  1359. /* set timing based on rolling average values */
  1360. set_rcvn(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1361. }
  1362. #else
  1363. /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
  1364. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1365. delay[bl] += QRTR_CLK;
  1366. set_rcvn(ch, rk, bl, delay[bl]);
  1367. }
  1368. #endif
  1369. /* disable FIFORST */
  1370. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
  1371. mrc_alt_write_mask(DDRPHY,
  1372. B01PTRCTL1 +
  1373. (bl >> 1) * DDRIODQ_BL_OFFSET +
  1374. ch * DDRIODQ_CH_OFFSET,
  1375. 1 << 8, 1 << 8);
  1376. }
  1377. #endif
  1378. }
  1379. }
  1380. }
  1381. }
  1382. #ifndef BACKUP_RCVN
  1383. /* restore original */
  1384. msg_port_write(MEM_CTLR, DTR1, dtr1_save);
  1385. #endif
  1386. LEAVEFN();
  1387. }
  1388. /*
  1389. * This function will perform the Write Levelling algorithm
  1390. * (align WCLK and WDQS).
  1391. *
  1392. * This algorithm will act on each rank in each channel separately.
  1393. */
  1394. void wr_level(struct mrc_params *mrc_params)
  1395. {
  1396. uint8_t ch; /* channel counter */
  1397. uint8_t rk; /* rank counter */
  1398. uint8_t bl; /* byte lane counter */
  1399. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1400. #ifdef R2R_SHARING
  1401. /* used to find placement for rank2rank sharing configs */
  1402. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1403. #ifndef BACKUP_WDQS
  1404. /* used to find placement for rank2rank sharing configs */
  1405. uint32_t num_ranks_enabled = 0;
  1406. #endif
  1407. #endif
  1408. #ifdef BACKUP_WDQS
  1409. #else
  1410. /* determines stop condition for CRS_WR_LVL */
  1411. bool all_edges_found;
  1412. /* absolute PI value to be programmed on the byte lane */
  1413. uint32_t delay[NUM_BYTE_LANES];
  1414. /*
  1415. * static makes it so the data is loaded in the heap once by shadow(),
  1416. * where non-static copies the data onto the stack every time this
  1417. * function is called
  1418. */
  1419. uint32_t address; /* address to be checked during COARSE_WR_LVL */
  1420. u32 dtr4, dtr4_save;
  1421. #endif
  1422. ENTERFN();
  1423. /* wr_level starts */
  1424. mrc_post_code(0x06, 0x00);
  1425. #ifdef R2R_SHARING
  1426. /* need to set "final_delay[][]" elements to "0" */
  1427. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1428. #endif
  1429. /* loop through each enabled channel */
  1430. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1431. if (mrc_params->channel_enables & (1 << ch)) {
  1432. /* perform WRITE LEVELING algorithm on a per rank basis */
  1433. for (rk = 0; rk < NUM_RANKS; rk++) {
  1434. if (mrc_params->rank_enables & (1 << rk)) {
  1435. /*
  1436. * POST_CODE here indicates the current
  1437. * rank and channel being calibrated
  1438. */
  1439. mrc_post_code(0x06, 0x10 + ((ch << 4) | rk));
  1440. #ifdef BACKUP_WDQS
  1441. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1442. set_wdqs(ch, rk, bl, ddr_wdqs[PLATFORM_ID]);
  1443. set_wdq(ch, rk, bl, ddr_wdqs[PLATFORM_ID] - QRTR_CLK);
  1444. }
  1445. #else
  1446. /*
  1447. * perform a single PRECHARGE_ALL command to
  1448. * make DRAM state machine go to IDLE state
  1449. */
  1450. dram_init_command(DCMD_PREA(rk));
  1451. /*
  1452. * enable Write Levelling Mode
  1453. * (EMRS1 w/ Write Levelling Mode Enable)
  1454. */
  1455. dram_init_command(DCMD_MRS1(rk, 0x82));
  1456. /*
  1457. * set ODT DRAM Full Time Termination
  1458. * disable in MCU
  1459. */
  1460. dtr4 = msg_port_read(MEM_CTLR, DTR4);
  1461. dtr4_save = dtr4;
  1462. dtr4 |= DTR4_ODTDIS;
  1463. msg_port_write(MEM_CTLR, DTR4, dtr4);
  1464. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  1465. /*
  1466. * Enable Sandy Bridge Mode (WDQ Tri-State) &
  1467. * Ensure 5 WDQS pulses during Write Leveling
  1468. */
  1469. mrc_alt_write_mask(DDRPHY,
  1470. DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
  1471. 0x10000154,
  1472. 0x100003fc);
  1473. }
  1474. /* Write Leveling Mode enabled in IO */
  1475. mrc_alt_write_mask(DDRPHY,
  1476. CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
  1477. 1 << 16, 1 << 16);
  1478. /* Initialize the starting delay to WCLK */
  1479. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1480. /*
  1481. * CLK0 --> RK0
  1482. * CLK1 --> RK1
  1483. */
  1484. delay[bl] = get_wclk(ch, rk);
  1485. set_wdqs(ch, rk, bl, delay[bl]);
  1486. }
  1487. /* now find the rising edge */
  1488. find_rising_edge(mrc_params, delay, ch, rk, false);
  1489. /* disable Write Levelling Mode */
  1490. mrc_alt_write_mask(DDRPHY,
  1491. CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
  1492. 0, 1 << 16);
  1493. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  1494. /* Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation */
  1495. mrc_alt_write_mask(DDRPHY,
  1496. DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
  1497. 0x00000154,
  1498. 0x100003fc);
  1499. }
  1500. /* restore original DTR4 */
  1501. msg_port_write(MEM_CTLR, DTR4, dtr4_save);
  1502. /*
  1503. * restore original value
  1504. * (Write Levelling Mode Disable)
  1505. */
  1506. dram_init_command(DCMD_MRS1(rk, mrc_params->mrs1));
  1507. /*
  1508. * perform a single PRECHARGE_ALL command to
  1509. * make DRAM state machine go to IDLE state
  1510. */
  1511. dram_init_command(DCMD_PREA(rk));
  1512. mrc_post_code(0x06, 0x30 + ((ch << 4) | rk));
  1513. /*
  1514. * COARSE WRITE LEVEL:
  1515. * check that we're on the correct clock edge
  1516. */
  1517. /* hte reconfiguration request */
  1518. mrc_params->hte_setup = 1;
  1519. /* start CRS_WR_LVL with WDQS = WDQS + 128 PI */
  1520. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1521. delay[bl] = get_wdqs(ch, rk, bl) + FULL_CLK;
  1522. set_wdqs(ch, rk, bl, delay[bl]);
  1523. /*
  1524. * program WDQ timings based on WDQS
  1525. * (WDQ = WDQS - 32 PI)
  1526. */
  1527. set_wdq(ch, rk, bl, (delay[bl] - QRTR_CLK));
  1528. }
  1529. /* get an address in the targeted channel/rank */
  1530. address = get_addr(ch, rk);
  1531. do {
  1532. uint32_t coarse_result = 0x00;
  1533. uint32_t coarse_result_mask = byte_lane_mask(mrc_params);
  1534. /* assume pass */
  1535. all_edges_found = true;
  1536. mrc_params->hte_setup = 1;
  1537. coarse_result = check_rw_coarse(mrc_params, address);
  1538. /* check for failures and margin the byte lane back 128 PI (1 CLK) */
  1539. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1540. if (coarse_result & (coarse_result_mask << bl)) {
  1541. all_edges_found = false;
  1542. delay[bl] -= FULL_CLK;
  1543. set_wdqs(ch, rk, bl, delay[bl]);
  1544. /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
  1545. set_wdq(ch, rk, bl, delay[bl] - QRTR_CLK);
  1546. }
  1547. }
  1548. } while (!all_edges_found);
  1549. #ifdef R2R_SHARING
  1550. /* increment "num_ranks_enabled" */
  1551. num_ranks_enabled++;
  1552. /* accumulate "final_delay[][]" values from "delay[]" values for rolling average */
  1553. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1554. final_delay[ch][bl] += delay[bl];
  1555. set_wdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1556. /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
  1557. set_wdq(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled - QRTR_CLK);
  1558. }
  1559. #endif
  1560. #endif
  1561. }
  1562. }
  1563. }
  1564. }
  1565. LEAVEFN();
  1566. }
  1567. void prog_page_ctrl(struct mrc_params *mrc_params)
  1568. {
  1569. u32 dpmc0;
  1570. ENTERFN();
  1571. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  1572. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  1573. dpmc0 |= (4 << 16);
  1574. dpmc0 |= DPMC0_PREAPWDEN;
  1575. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  1576. }
  1577. /*
  1578. * This function will perform the READ TRAINING Algorithm on all
  1579. * channels/ranks/byte_lanes simultaneously to minimize execution time.
  1580. *
  1581. * The idea here is to train the VREF and RDQS (and eventually RDQ) values
  1582. * to achieve maximum READ margins. The algorithm will first determine the
  1583. * X coordinate (RDQS setting). This is done by collapsing the VREF eye
  1584. * until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
  1585. * Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX,
  1586. * then average those; this will be the final X coordinate. The algorithm
  1587. * will then determine the Y coordinate (VREF setting). This is done by
  1588. * collapsing the RDQS eye until we find a minimum required VREF eye for
  1589. * RDQS_MIN and RDQS_MAX. Then we take the averages of the VREF eye at
  1590. * RDQS_MIN and RDQS_MAX, then average those; this will be the final Y
  1591. * coordinate.
  1592. *
  1593. * NOTE: this algorithm assumes the eye curves have a one-to-one relationship,
  1594. * meaning for each X the curve has only one Y and vice-a-versa.
  1595. */
  1596. void rd_train(struct mrc_params *mrc_params)
  1597. {
  1598. uint8_t ch; /* channel counter */
  1599. uint8_t rk; /* rank counter */
  1600. uint8_t bl; /* byte lane counter */
  1601. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1602. #ifdef BACKUP_RDQS
  1603. #else
  1604. uint8_t side_x; /* tracks LEFT/RIGHT approach vectors */
  1605. uint8_t side_y; /* tracks BOTTOM/TOP approach vectors */
  1606. /* X coordinate data (passing RDQS values) for approach vectors */
  1607. uint8_t x_coordinate[2][2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1608. /* Y coordinate data (passing VREF values) for approach vectors */
  1609. uint8_t y_coordinate[2][2][NUM_CHANNELS][NUM_BYTE_LANES];
  1610. /* centered X (RDQS) */
  1611. uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1612. /* centered Y (VREF) */
  1613. uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES];
  1614. uint32_t address; /* target address for check_bls_ex() */
  1615. uint32_t result; /* result of check_bls_ex() */
  1616. uint32_t bl_mask; /* byte lane mask for result checking */
  1617. #ifdef R2R_SHARING
  1618. /* used to find placement for rank2rank sharing configs */
  1619. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1620. /* used to find placement for rank2rank sharing configs */
  1621. uint32_t num_ranks_enabled = 0;
  1622. #endif
  1623. #endif
  1624. /* rd_train starts */
  1625. mrc_post_code(0x07, 0x00);
  1626. ENTERFN();
  1627. #ifdef BACKUP_RDQS
  1628. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1629. if (mrc_params->channel_enables & (1 << ch)) {
  1630. for (rk = 0; rk < NUM_RANKS; rk++) {
  1631. if (mrc_params->rank_enables & (1 << rk)) {
  1632. for (bl = 0;
  1633. bl < NUM_BYTE_LANES / bl_divisor;
  1634. bl++) {
  1635. set_rdqs(ch, rk, bl, ddr_rdqs[PLATFORM_ID]);
  1636. }
  1637. }
  1638. }
  1639. }
  1640. }
  1641. #else
  1642. /* initialize x/y_coordinate arrays */
  1643. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1644. if (mrc_params->channel_enables & (1 << ch)) {
  1645. for (rk = 0; rk < NUM_RANKS; rk++) {
  1646. if (mrc_params->rank_enables & (1 << rk)) {
  1647. for (bl = 0;
  1648. bl < NUM_BYTE_LANES / bl_divisor;
  1649. bl++) {
  1650. /* x_coordinate */
  1651. x_coordinate[L][B][ch][rk][bl] = RDQS_MIN;
  1652. x_coordinate[R][B][ch][rk][bl] = RDQS_MAX;
  1653. x_coordinate[L][T][ch][rk][bl] = RDQS_MIN;
  1654. x_coordinate[R][T][ch][rk][bl] = RDQS_MAX;
  1655. /* y_coordinate */
  1656. y_coordinate[L][B][ch][bl] = VREF_MIN;
  1657. y_coordinate[R][B][ch][bl] = VREF_MIN;
  1658. y_coordinate[L][T][ch][bl] = VREF_MAX;
  1659. y_coordinate[R][T][ch][bl] = VREF_MAX;
  1660. }
  1661. }
  1662. }
  1663. }
  1664. }
  1665. /* initialize other variables */
  1666. bl_mask = byte_lane_mask(mrc_params);
  1667. address = get_addr(0, 0);
  1668. #ifdef R2R_SHARING
  1669. /* need to set "final_delay[][]" elements to "0" */
  1670. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1671. #endif
  1672. /* look for passing coordinates */
  1673. for (side_y = B; side_y <= T; side_y++) {
  1674. for (side_x = L; side_x <= R; side_x++) {
  1675. mrc_post_code(0x07, 0x10 + side_y * 2 + side_x);
  1676. /* find passing values */
  1677. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1678. if (mrc_params->channel_enables & (0x1 << ch)) {
  1679. for (rk = 0; rk < NUM_RANKS; rk++) {
  1680. if (mrc_params->rank_enables &
  1681. (0x1 << rk)) {
  1682. /* set x/y_coordinate search starting settings */
  1683. for (bl = 0;
  1684. bl < NUM_BYTE_LANES / bl_divisor;
  1685. bl++) {
  1686. set_rdqs(ch, rk, bl,
  1687. x_coordinate[side_x][side_y][ch][rk][bl]);
  1688. set_vref(ch, bl,
  1689. y_coordinate[side_x][side_y][ch][bl]);
  1690. }
  1691. /* get an address in the target channel/rank */
  1692. address = get_addr(ch, rk);
  1693. /* request HTE reconfiguration */
  1694. mrc_params->hte_setup = 1;
  1695. /* test the settings */
  1696. do {
  1697. /* result[07:00] == failing byte lane (MAX 8) */
  1698. result = check_bls_ex(mrc_params, address);
  1699. /* check for failures */
  1700. if (result & 0xff) {
  1701. /* at least 1 byte lane failed */
  1702. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1703. if (result &
  1704. (bl_mask << bl)) {
  1705. /* adjust the RDQS values accordingly */
  1706. if (side_x == L)
  1707. x_coordinate[L][side_y][ch][rk][bl] += RDQS_STEP;
  1708. else
  1709. x_coordinate[R][side_y][ch][rk][bl] -= RDQS_STEP;
  1710. /* check that we haven't closed the RDQS_EYE too much */
  1711. if ((x_coordinate[L][side_y][ch][rk][bl] > (RDQS_MAX - MIN_RDQS_EYE)) ||
  1712. (x_coordinate[R][side_y][ch][rk][bl] < (RDQS_MIN + MIN_RDQS_EYE)) ||
  1713. (x_coordinate[L][side_y][ch][rk][bl] ==
  1714. x_coordinate[R][side_y][ch][rk][bl])) {
  1715. /*
  1716. * not enough RDQS margin available at this VREF
  1717. * update VREF values accordingly
  1718. */
  1719. if (side_y == B)
  1720. y_coordinate[side_x][B][ch][bl] += VREF_STEP;
  1721. else
  1722. y_coordinate[side_x][T][ch][bl] -= VREF_STEP;
  1723. /* check that we haven't closed the VREF_EYE too much */
  1724. if ((y_coordinate[side_x][B][ch][bl] > (VREF_MAX - MIN_VREF_EYE)) ||
  1725. (y_coordinate[side_x][T][ch][bl] < (VREF_MIN + MIN_VREF_EYE)) ||
  1726. (y_coordinate[side_x][B][ch][bl] == y_coordinate[side_x][T][ch][bl])) {
  1727. /* VREF_EYE collapsed below MIN_VREF_EYE */
  1728. training_message(ch, rk, bl);
  1729. mrc_post_code(0xEE, 0x70 + side_y * 2 + side_x);
  1730. } else {
  1731. /* update the VREF setting */
  1732. set_vref(ch, bl, y_coordinate[side_x][side_y][ch][bl]);
  1733. /* reset the X coordinate to begin the search at the new VREF */
  1734. x_coordinate[side_x][side_y][ch][rk][bl] =
  1735. (side_x == L) ? RDQS_MIN : RDQS_MAX;
  1736. }
  1737. }
  1738. /* update the RDQS setting */
  1739. set_rdqs(ch, rk, bl, x_coordinate[side_x][side_y][ch][rk][bl]);
  1740. }
  1741. }
  1742. }
  1743. } while (result & 0xff);
  1744. }
  1745. }
  1746. }
  1747. }
  1748. }
  1749. }
  1750. mrc_post_code(0x07, 0x20);
  1751. /* find final RDQS (X coordinate) & final VREF (Y coordinate) */
  1752. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1753. if (mrc_params->channel_enables & (1 << ch)) {
  1754. for (rk = 0; rk < NUM_RANKS; rk++) {
  1755. if (mrc_params->rank_enables & (1 << rk)) {
  1756. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1757. uint32_t temp1;
  1758. uint32_t temp2;
  1759. /* x_coordinate */
  1760. DPF(D_INFO,
  1761. "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n",
  1762. rk, bl,
  1763. x_coordinate[L][T][ch][rk][bl],
  1764. x_coordinate[R][T][ch][rk][bl],
  1765. x_coordinate[L][B][ch][rk][bl],
  1766. x_coordinate[R][B][ch][rk][bl]);
  1767. /* average the TOP side LEFT & RIGHT values */
  1768. temp1 = (x_coordinate[R][T][ch][rk][bl] + x_coordinate[L][T][ch][rk][bl]) / 2;
  1769. /* average the BOTTOM side LEFT & RIGHT values */
  1770. temp2 = (x_coordinate[R][B][ch][rk][bl] + x_coordinate[L][B][ch][rk][bl]) / 2;
  1771. /* average the above averages */
  1772. x_center[ch][rk][bl] = (uint8_t) ((temp1 + temp2) / 2);
  1773. /* y_coordinate */
  1774. DPF(D_INFO,
  1775. "VREF R/L eye lane%d : %d-%d %d-%d\n",
  1776. bl,
  1777. y_coordinate[R][B][ch][bl],
  1778. y_coordinate[R][T][ch][bl],
  1779. y_coordinate[L][B][ch][bl],
  1780. y_coordinate[L][T][ch][bl]);
  1781. /* average the RIGHT side TOP & BOTTOM values */
  1782. temp1 = (y_coordinate[R][T][ch][bl] + y_coordinate[R][B][ch][bl]) / 2;
  1783. /* average the LEFT side TOP & BOTTOM values */
  1784. temp2 = (y_coordinate[L][T][ch][bl] + y_coordinate[L][B][ch][bl]) / 2;
  1785. /* average the above averages */
  1786. y_center[ch][bl] = (uint8_t) ((temp1 + temp2) / 2);
  1787. }
  1788. }
  1789. }
  1790. }
  1791. }
  1792. #ifdef RX_EYE_CHECK
  1793. /* perform an eye check */
  1794. for (side_y = B; side_y <= T; side_y++) {
  1795. for (side_x = L; side_x <= R; side_x++) {
  1796. mrc_post_code(0x07, 0x30 + side_y * 2 + side_x);
  1797. /* update the settings for the eye check */
  1798. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1799. if (mrc_params->channel_enables & (1 << ch)) {
  1800. for (rk = 0; rk < NUM_RANKS; rk++) {
  1801. if (mrc_params->rank_enables & (1 << rk)) {
  1802. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1803. if (side_x == L)
  1804. set_rdqs(ch, rk, bl, x_center[ch][rk][bl] - (MIN_RDQS_EYE / 2));
  1805. else
  1806. set_rdqs(ch, rk, bl, x_center[ch][rk][bl] + (MIN_RDQS_EYE / 2));
  1807. if (side_y == B)
  1808. set_vref(ch, bl, y_center[ch][bl] - (MIN_VREF_EYE / 2));
  1809. else
  1810. set_vref(ch, bl, y_center[ch][bl] + (MIN_VREF_EYE / 2));
  1811. }
  1812. }
  1813. }
  1814. }
  1815. }
  1816. /* request HTE reconfiguration */
  1817. mrc_params->hte_setup = 1;
  1818. /* check the eye */
  1819. if (check_bls_ex(mrc_params, address) & 0xff) {
  1820. /* one or more byte lanes failed */
  1821. mrc_post_code(0xee, 0x74 + side_x * 2 + side_y);
  1822. }
  1823. }
  1824. }
  1825. #endif
  1826. mrc_post_code(0x07, 0x40);
  1827. /* set final placements */
  1828. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1829. if (mrc_params->channel_enables & (1 << ch)) {
  1830. for (rk = 0; rk < NUM_RANKS; rk++) {
  1831. if (mrc_params->rank_enables & (1 << rk)) {
  1832. #ifdef R2R_SHARING
  1833. /* increment "num_ranks_enabled" */
  1834. num_ranks_enabled++;
  1835. #endif
  1836. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1837. /* x_coordinate */
  1838. #ifdef R2R_SHARING
  1839. final_delay[ch][bl] += x_center[ch][rk][bl];
  1840. set_rdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1841. #else
  1842. set_rdqs(ch, rk, bl, x_center[ch][rk][bl]);
  1843. #endif
  1844. /* y_coordinate */
  1845. set_vref(ch, bl, y_center[ch][bl]);
  1846. }
  1847. }
  1848. }
  1849. }
  1850. }
  1851. #endif
  1852. LEAVEFN();
  1853. }
  1854. /*
  1855. * This function will perform the WRITE TRAINING Algorithm on all
  1856. * channels/ranks/byte_lanes simultaneously to minimize execution time.
  1857. *
  1858. * The idea here is to train the WDQ timings to achieve maximum WRITE margins.
  1859. * The algorithm will start with WDQ at the current WDQ setting (tracks WDQS
  1860. * in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data
  1861. * patterns pass. This is because WDQS will be aligned to WCLK by the
  1862. * Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window
  1863. * of validity.
  1864. */
  1865. void wr_train(struct mrc_params *mrc_params)
  1866. {
  1867. uint8_t ch; /* channel counter */
  1868. uint8_t rk; /* rank counter */
  1869. uint8_t bl; /* byte lane counter */
  1870. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1871. #ifdef BACKUP_WDQ
  1872. #else
  1873. uint8_t side; /* LEFT/RIGHT side indicator (0=L, 1=R) */
  1874. uint32_t temp; /* temporary DWORD */
  1875. /* 2 arrays, for L & R side passing delays */
  1876. uint32_t delay[2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1877. uint32_t address; /* target address for check_bls_ex() */
  1878. uint32_t result; /* result of check_bls_ex() */
  1879. uint32_t bl_mask; /* byte lane mask for result checking */
  1880. #ifdef R2R_SHARING
  1881. /* used to find placement for rank2rank sharing configs */
  1882. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1883. /* used to find placement for rank2rank sharing configs */
  1884. uint32_t num_ranks_enabled = 0;
  1885. #endif
  1886. #endif
  1887. /* wr_train starts */
  1888. mrc_post_code(0x08, 0x00);
  1889. ENTERFN();
  1890. #ifdef BACKUP_WDQ
  1891. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1892. if (mrc_params->channel_enables & (1 << ch)) {
  1893. for (rk = 0; rk < NUM_RANKS; rk++) {
  1894. if (mrc_params->rank_enables & (1 << rk)) {
  1895. for (bl = 0;
  1896. bl < NUM_BYTE_LANES / bl_divisor;
  1897. bl++) {
  1898. set_wdq(ch, rk, bl, ddr_wdq[PLATFORM_ID]);
  1899. }
  1900. }
  1901. }
  1902. }
  1903. }
  1904. #else
  1905. /* initialize "delay" */
  1906. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1907. if (mrc_params->channel_enables & (1 << ch)) {
  1908. for (rk = 0; rk < NUM_RANKS; rk++) {
  1909. if (mrc_params->rank_enables & (1 << rk)) {
  1910. for (bl = 0;
  1911. bl < NUM_BYTE_LANES / bl_divisor;
  1912. bl++) {
  1913. /*
  1914. * want to start with
  1915. * WDQ = (WDQS - QRTR_CLK)
  1916. * +/- QRTR_CLK
  1917. */
  1918. temp = get_wdqs(ch, rk, bl) - QRTR_CLK;
  1919. delay[L][ch][rk][bl] = temp - QRTR_CLK;
  1920. delay[R][ch][rk][bl] = temp + QRTR_CLK;
  1921. }
  1922. }
  1923. }
  1924. }
  1925. }
  1926. /* initialize other variables */
  1927. bl_mask = byte_lane_mask(mrc_params);
  1928. address = get_addr(0, 0);
  1929. #ifdef R2R_SHARING
  1930. /* need to set "final_delay[][]" elements to "0" */
  1931. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1932. #endif
  1933. /*
  1934. * start algorithm on the LEFT side and train each channel/bl
  1935. * until no failures are observed, then repeat for the RIGHT side.
  1936. */
  1937. for (side = L; side <= R; side++) {
  1938. mrc_post_code(0x08, 0x10 + side);
  1939. /* set starting values */
  1940. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1941. if (mrc_params->channel_enables & (1 << ch)) {
  1942. for (rk = 0; rk < NUM_RANKS; rk++) {
  1943. if (mrc_params->rank_enables &
  1944. (1 << rk)) {
  1945. for (bl = 0;
  1946. bl < NUM_BYTE_LANES / bl_divisor;
  1947. bl++) {
  1948. set_wdq(ch, rk, bl, delay[side][ch][rk][bl]);
  1949. }
  1950. }
  1951. }
  1952. }
  1953. }
  1954. /* find passing values */
  1955. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1956. if (mrc_params->channel_enables & (1 << ch)) {
  1957. for (rk = 0; rk < NUM_RANKS; rk++) {
  1958. if (mrc_params->rank_enables &
  1959. (1 << rk)) {
  1960. /* get an address in the target channel/rank */
  1961. address = get_addr(ch, rk);
  1962. /* request HTE reconfiguration */
  1963. mrc_params->hte_setup = 1;
  1964. /* check the settings */
  1965. do {
  1966. /* result[07:00] == failing byte lane (MAX 8) */
  1967. result = check_bls_ex(mrc_params, address);
  1968. /* check for failures */
  1969. if (result & 0xff) {
  1970. /* at least 1 byte lane failed */
  1971. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1972. if (result &
  1973. (bl_mask << bl)) {
  1974. if (side == L)
  1975. delay[L][ch][rk][bl] += WDQ_STEP;
  1976. else
  1977. delay[R][ch][rk][bl] -= WDQ_STEP;
  1978. /* check for algorithm failure */
  1979. if (delay[L][ch][rk][bl] != delay[R][ch][rk][bl]) {
  1980. /*
  1981. * margin available
  1982. * update delay setting
  1983. */
  1984. set_wdq(ch, rk, bl,
  1985. delay[side][ch][rk][bl]);
  1986. } else {
  1987. /*
  1988. * no margin available
  1989. * notify the user and halt
  1990. */
  1991. training_message(ch, rk, bl);
  1992. mrc_post_code(0xee, 0x80 + side);
  1993. }
  1994. }
  1995. }
  1996. }
  1997. /* stop when all byte lanes pass */
  1998. } while (result & 0xff);
  1999. }
  2000. }
  2001. }
  2002. }
  2003. }
  2004. /* program WDQ to the middle of passing window */
  2005. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  2006. if (mrc_params->channel_enables & (1 << ch)) {
  2007. for (rk = 0; rk < NUM_RANKS; rk++) {
  2008. if (mrc_params->rank_enables & (1 << rk)) {
  2009. #ifdef R2R_SHARING
  2010. /* increment "num_ranks_enabled" */
  2011. num_ranks_enabled++;
  2012. #endif
  2013. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  2014. DPF(D_INFO,
  2015. "WDQ eye rank%d lane%d : %d-%d\n",
  2016. rk, bl,
  2017. delay[L][ch][rk][bl],
  2018. delay[R][ch][rk][bl]);
  2019. temp = (delay[R][ch][rk][bl] + delay[L][ch][rk][bl]) / 2;
  2020. #ifdef R2R_SHARING
  2021. final_delay[ch][bl] += temp;
  2022. set_wdq(ch, rk, bl,
  2023. final_delay[ch][bl] / num_ranks_enabled);
  2024. #else
  2025. set_wdq(ch, rk, bl, temp);
  2026. #endif
  2027. }
  2028. }
  2029. }
  2030. }
  2031. }
  2032. #endif
  2033. LEAVEFN();
  2034. }
  2035. /*
  2036. * This function will store relevant timing data
  2037. *
  2038. * This data will be used on subsequent boots to speed up boot times
  2039. * and is required for Suspend To RAM capabilities.
  2040. */
  2041. void store_timings(struct mrc_params *mrc_params)
  2042. {
  2043. uint8_t ch, rk, bl;
  2044. struct mrc_timings *mt = &mrc_params->timings;
  2045. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  2046. for (rk = 0; rk < NUM_RANKS; rk++) {
  2047. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  2048. mt->rcvn[ch][rk][bl] = get_rcvn(ch, rk, bl);
  2049. mt->rdqs[ch][rk][bl] = get_rdqs(ch, rk, bl);
  2050. mt->wdqs[ch][rk][bl] = get_wdqs(ch, rk, bl);
  2051. mt->wdq[ch][rk][bl] = get_wdq(ch, rk, bl);
  2052. if (rk == 0)
  2053. mt->vref[ch][bl] = get_vref(ch, bl);
  2054. }
  2055. mt->wctl[ch][rk] = get_wctl(ch, rk);
  2056. }
  2057. mt->wcmd[ch] = get_wcmd(ch);
  2058. }
  2059. /* need to save for a case of changing frequency after warm reset */
  2060. mt->ddr_speed = mrc_params->ddr_speed;
  2061. }
  2062. /*
  2063. * The purpose of this function is to ensure the SEC comes out of reset
  2064. * and IA initiates the SEC enabling Memory Scrambling.
  2065. */
  2066. void enable_scrambling(struct mrc_params *mrc_params)
  2067. {
  2068. uint32_t lfsr = 0;
  2069. uint8_t i;
  2070. if (mrc_params->scrambling_enables == 0)
  2071. return;
  2072. ENTERFN();
  2073. /* 32 bit seed is always stored in BIOS NVM */
  2074. lfsr = mrc_params->timings.scrambler_seed;
  2075. if (mrc_params->boot_mode == BM_COLD) {
  2076. /*
  2077. * factory value is 0 and in first boot,
  2078. * a clock based seed is loaded.
  2079. */
  2080. if (lfsr == 0) {
  2081. /*
  2082. * get seed from system clock
  2083. * and make sure it is not all 1's
  2084. */
  2085. lfsr = rdtsc() & 0x0fffffff;
  2086. } else {
  2087. /*
  2088. * Need to replace scrambler
  2089. *
  2090. * get next 32bit LFSR 16 times which is the last
  2091. * part of the previous scrambler vector
  2092. */
  2093. for (i = 0; i < 16; i++)
  2094. lfsr32(&lfsr);
  2095. }
  2096. /* save new seed */
  2097. mrc_params->timings.scrambler_seed = lfsr;
  2098. }
  2099. /*
  2100. * In warm boot or S3 exit, we have the previous seed.
  2101. * In cold boot, we have the last 32bit LFSR which is the new seed.
  2102. */
  2103. lfsr32(&lfsr); /* shift to next value */
  2104. msg_port_write(MEM_CTLR, SCRMSEED, (lfsr & 0x0003ffff));
  2105. for (i = 0; i < 2; i++)
  2106. msg_port_write(MEM_CTLR, SCRMLO + i, (lfsr & 0xaaaaaaaa));
  2107. LEAVEFN();
  2108. }
  2109. /*
  2110. * Configure MCU Power Management Control Register
  2111. * and Scheduler Control Register
  2112. */
  2113. void prog_ddr_control(struct mrc_params *mrc_params)
  2114. {
  2115. u32 dsch;
  2116. u32 dpmc0;
  2117. ENTERFN();
  2118. dsch = msg_port_read(MEM_CTLR, DSCH);
  2119. dsch &= ~(DSCH_OOODIS | DSCH_OOOST3DIS | DSCH_NEWBYPDIS);
  2120. msg_port_write(MEM_CTLR, DSCH, dsch);
  2121. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  2122. dpmc0 &= ~DPMC0_DISPWRDN;
  2123. dpmc0 |= (mrc_params->power_down_disable << 25);
  2124. dpmc0 &= ~DPMC0_CLKGTDIS;
  2125. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  2126. dpmc0 |= (4 << 16);
  2127. dpmc0 |= DPMC0_PREAPWDEN;
  2128. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  2129. /* CMDTRIST = 2h - CMD/ADDR are tristated when no valid command */
  2130. mrc_write_mask(MEM_CTLR, DPMC1, 0x20, 0x30);
  2131. LEAVEFN();
  2132. }
  2133. /*
  2134. * After training complete configure MCU Rank Population Register
  2135. * specifying: ranks enabled, device width, density, address mode
  2136. */
  2137. void prog_dra_drb(struct mrc_params *mrc_params)
  2138. {
  2139. u32 drp;
  2140. u32 dco;
  2141. u8 density = mrc_params->params.density;
  2142. ENTERFN();
  2143. dco = msg_port_read(MEM_CTLR, DCO);
  2144. dco &= ~DCO_IC;
  2145. msg_port_write(MEM_CTLR, DCO, dco);
  2146. drp = 0;
  2147. if (mrc_params->rank_enables & 1)
  2148. drp |= DRP_RKEN0;
  2149. if (mrc_params->rank_enables & 2)
  2150. drp |= DRP_RKEN1;
  2151. if (mrc_params->dram_width == X16) {
  2152. drp |= (1 << 4);
  2153. drp |= (1 << 9);
  2154. }
  2155. /*
  2156. * Density encoding in struct dram_params: 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
  2157. * has to be mapped RANKDENSx encoding (0=1Gb)
  2158. */
  2159. if (density == 0)
  2160. density = 4;
  2161. drp |= ((density - 1) << 6);
  2162. drp |= ((density - 1) << 11);
  2163. /* Address mode can be overwritten if ECC enabled */
  2164. drp |= (mrc_params->address_mode << 14);
  2165. msg_port_write(MEM_CTLR, DRP, drp);
  2166. dco &= ~DCO_PMICTL;
  2167. dco |= DCO_IC;
  2168. msg_port_write(MEM_CTLR, DCO, dco);
  2169. LEAVEFN();
  2170. }
  2171. /* Send DRAM wake command */
  2172. void perform_wake(struct mrc_params *mrc_params)
  2173. {
  2174. ENTERFN();
  2175. dram_wake_command();
  2176. LEAVEFN();
  2177. }
  2178. /*
  2179. * Configure refresh rate and short ZQ calibration interval
  2180. * Activate dynamic self refresh
  2181. */
  2182. void change_refresh_period(struct mrc_params *mrc_params)
  2183. {
  2184. u32 drfc;
  2185. u32 dcal;
  2186. u32 dpmc0;
  2187. ENTERFN();
  2188. drfc = msg_port_read(MEM_CTLR, DRFC);
  2189. drfc &= ~DRFC_TREFI_MASK;
  2190. drfc |= (mrc_params->refresh_rate << 12);
  2191. drfc |= DRFC_REFDBTCLR;
  2192. msg_port_write(MEM_CTLR, DRFC, drfc);
  2193. dcal = msg_port_read(MEM_CTLR, DCAL);
  2194. dcal &= ~DCAL_ZQCINT_MASK;
  2195. dcal |= (3 << 8); /* 63ms */
  2196. msg_port_write(MEM_CTLR, DCAL, dcal);
  2197. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  2198. dpmc0 |= (DPMC0_DYNSREN | DPMC0_ENPHYCLKGATE);
  2199. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  2200. LEAVEFN();
  2201. }
  2202. /*
  2203. * Configure DDRPHY for Auto-Refresh, Periodic Compensations,
  2204. * Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
  2205. */
  2206. void set_auto_refresh(struct mrc_params *mrc_params)
  2207. {
  2208. uint32_t channel;
  2209. uint32_t rank;
  2210. uint32_t bl;
  2211. uint32_t bl_divisor = 1;
  2212. uint32_t temp;
  2213. ENTERFN();
  2214. /*
  2215. * Enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp,
  2216. * ZQSPERIOD, Auto-Precharge, CKE Power-Down
  2217. */
  2218. for (channel = 0; channel < NUM_CHANNELS; channel++) {
  2219. if (mrc_params->channel_enables & (1 << channel)) {
  2220. /* Enable Periodic RCOMPS */
  2221. mrc_alt_write_mask(DDRPHY, CMPCTRL, 2, 2);
  2222. /* Enable Dynamic DiffAmp & Set Read ODT Value */
  2223. switch (mrc_params->rd_odt_value) {
  2224. case 0:
  2225. temp = 0x3f; /* OFF */
  2226. break;
  2227. default:
  2228. temp = 0x00; /* Auto */
  2229. break;
  2230. }
  2231. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  2232. /* Override: DIFFAMP, ODT */
  2233. mrc_alt_write_mask(DDRPHY,
  2234. B0OVRCTL + bl * DDRIODQ_BL_OFFSET +
  2235. channel * DDRIODQ_CH_OFFSET,
  2236. temp << 10,
  2237. 0x003ffc00);
  2238. /* Override: DIFFAMP, ODT */
  2239. mrc_alt_write_mask(DDRPHY,
  2240. B1OVRCTL + bl * DDRIODQ_BL_OFFSET +
  2241. channel * DDRIODQ_CH_OFFSET,
  2242. temp << 10,
  2243. 0x003ffc00);
  2244. }
  2245. /* Issue ZQCS command */
  2246. for (rank = 0; rank < NUM_RANKS; rank++) {
  2247. if (mrc_params->rank_enables & (1 << rank))
  2248. dram_init_command(DCMD_ZQCS(rank));
  2249. }
  2250. }
  2251. }
  2252. clear_pointers();
  2253. LEAVEFN();
  2254. }
  2255. /*
  2256. * Depending on configuration enables ECC support
  2257. *
  2258. * Available memory size is decreased, and updated with 0s
  2259. * in order to clear error status. Address mode 2 forced.
  2260. */
  2261. void ecc_enable(struct mrc_params *mrc_params)
  2262. {
  2263. u32 drp;
  2264. u32 dsch;
  2265. u32 ecc_ctrl;
  2266. if (mrc_params->ecc_enables == 0)
  2267. return;
  2268. ENTERFN();
  2269. /* Configuration required in ECC mode */
  2270. drp = msg_port_read(MEM_CTLR, DRP);
  2271. drp &= ~DRP_ADDRMAP_MASK;
  2272. drp |= DRP_ADDRMAP_MAP1;
  2273. drp |= DRP_PRI64BSPLITEN;
  2274. msg_port_write(MEM_CTLR, DRP, drp);
  2275. /* Disable new request bypass */
  2276. dsch = msg_port_read(MEM_CTLR, DSCH);
  2277. dsch |= DSCH_NEWBYPDIS;
  2278. msg_port_write(MEM_CTLR, DSCH, dsch);
  2279. /* Enable ECC */
  2280. ecc_ctrl = (DECCCTRL_SBEEN | DECCCTRL_DBEEN | DECCCTRL_ENCBGEN);
  2281. msg_port_write(MEM_CTLR, DECCCTRL, ecc_ctrl);
  2282. /* Assume 8 bank memory, one bank is gone for ECC */
  2283. mrc_params->mem_size -= mrc_params->mem_size / 8;
  2284. /* For S3 resume memory content has to be preserved */
  2285. if (mrc_params->boot_mode != BM_S3) {
  2286. select_hte();
  2287. hte_mem_init(mrc_params, MRC_MEM_INIT);
  2288. select_mem_mgr();
  2289. }
  2290. LEAVEFN();
  2291. }
  2292. /*
  2293. * Execute memory test
  2294. * if error detected it is indicated in mrc_params->status
  2295. */
  2296. void memory_test(struct mrc_params *mrc_params)
  2297. {
  2298. uint32_t result = 0;
  2299. ENTERFN();
  2300. select_hte();
  2301. result = hte_mem_init(mrc_params, MRC_MEM_TEST);
  2302. select_mem_mgr();
  2303. DPF(D_INFO, "Memory test result %x\n", result);
  2304. mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);
  2305. LEAVEFN();
  2306. }
  2307. /* Lock MCU registers at the end of initialization sequence */
  2308. void lock_registers(struct mrc_params *mrc_params)
  2309. {
  2310. u32 dco;
  2311. ENTERFN();
  2312. dco = msg_port_read(MEM_CTLR, DCO);
  2313. dco &= ~(DCO_PMICTL | DCO_PMIDIS);
  2314. dco |= (DCO_DRPLOCK | DCO_CPGCLOCK);
  2315. msg_port_write(MEM_CTLR, DCO, dco);
  2316. LEAVEFN();
  2317. }