smc.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621
  1. /*
  2. * Copyright (C) 2013, Intel Corporation
  3. * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
  4. *
  5. * Ported from Intel released Quark UEFI BIOS
  6. * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
  7. *
  8. * SPDX-License-Identifier: Intel
  9. */
  10. #include <common.h>
  11. #include <pci.h>
  12. #include <asm/arch/device.h>
  13. #include <asm/arch/mrc.h>
  14. #include <asm/arch/msg_port.h>
  15. #include "mrc_util.h"
  16. #include "hte.h"
  17. #include "smc.h"
  18. /* t_rfc values (in picoseconds) per density */
  19. static const uint32_t t_rfc[5] = {
  20. 90000, /* 512Mb */
  21. 110000, /* 1Gb */
  22. 160000, /* 2Gb */
  23. 300000, /* 4Gb */
  24. 350000, /* 8Gb */
  25. };
  26. /* t_ck clock period in picoseconds per speed index 800, 1066, 1333 */
  27. static const uint32_t t_ck[3] = {
  28. 2500,
  29. 1875,
  30. 1500
  31. };
  32. /* Global variables */
  33. static const uint16_t ddr_wclk[] = {193, 158};
  34. static const uint16_t ddr_wctl[] = {1, 217};
  35. static const uint16_t ddr_wcmd[] = {1, 220};
  36. #ifdef BACKUP_RCVN
  37. static const uint16_t ddr_rcvn[] = {129, 498};
  38. #endif
  39. #ifdef BACKUP_WDQS
  40. static const uint16_t ddr_wdqs[] = {65, 289};
  41. #endif
  42. #ifdef BACKUP_RDQS
  43. static const uint8_t ddr_rdqs[] = {32, 24};
  44. #endif
  45. #ifdef BACKUP_WDQ
  46. static const uint16_t ddr_wdq[] = {32, 257};
  47. #endif
  48. /* Stop self refresh driven by MCU */
  49. void clear_self_refresh(struct mrc_params *mrc_params)
  50. {
  51. ENTERFN();
  52. /* clear the PMSTS Channel Self Refresh bits */
  53. mrc_write_mask(MEM_CTLR, PMSTS, PMSTS_DISR, PMSTS_DISR);
  54. LEAVEFN();
  55. }
  56. /* It will initialize timing registers in the MCU (DTR0..DTR4) */
  57. void prog_ddr_timing_control(struct mrc_params *mrc_params)
  58. {
  59. uint8_t tcl, wl;
  60. uint8_t trp, trcd, tras, twr, twtr, trrd, trtp, tfaw;
  61. uint32_t tck;
  62. u32 dtr0, dtr1, dtr2, dtr3, dtr4;
  63. u32 tmp1, tmp2;
  64. ENTERFN();
  65. /* mcu_init starts */
  66. mrc_post_code(0x02, 0x00);
  67. dtr0 = msg_port_read(MEM_CTLR, DTR0);
  68. dtr1 = msg_port_read(MEM_CTLR, DTR1);
  69. dtr2 = msg_port_read(MEM_CTLR, DTR2);
  70. dtr3 = msg_port_read(MEM_CTLR, DTR3);
  71. dtr4 = msg_port_read(MEM_CTLR, DTR4);
  72. tck = t_ck[mrc_params->ddr_speed]; /* Clock in picoseconds */
  73. tcl = mrc_params->params.cl; /* CAS latency in clocks */
  74. trp = tcl; /* Per CAT MRC */
  75. trcd = tcl; /* Per CAT MRC */
  76. tras = MCEIL(mrc_params->params.ras, tck);
  77. /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
  78. twr = MCEIL(15000, tck);
  79. twtr = MCEIL(mrc_params->params.wtr, tck);
  80. trrd = MCEIL(mrc_params->params.rrd, tck);
  81. trtp = 4; /* Valid for 800 and 1066, use 5 for 1333 */
  82. tfaw = MCEIL(mrc_params->params.faw, tck);
  83. wl = 5 + mrc_params->ddr_speed;
  84. dtr0 &= ~DTR0_DFREQ_MASK;
  85. dtr0 |= mrc_params->ddr_speed;
  86. dtr0 &= ~DTR0_TCL_MASK;
  87. tmp1 = tcl - 5;
  88. dtr0 |= ((tcl - 5) << 12);
  89. dtr0 &= ~DTR0_TRP_MASK;
  90. dtr0 |= ((trp - 5) << 4); /* 5 bit DRAM Clock */
  91. dtr0 &= ~DTR0_TRCD_MASK;
  92. dtr0 |= ((trcd - 5) << 8); /* 5 bit DRAM Clock */
  93. dtr1 &= ~DTR1_TWCL_MASK;
  94. tmp2 = wl - 3;
  95. dtr1 |= (wl - 3);
  96. dtr1 &= ~DTR1_TWTP_MASK;
  97. dtr1 |= ((wl + 4 + twr - 14) << 8); /* Change to tWTP */
  98. dtr1 &= ~DTR1_TRTP_MASK;
  99. dtr1 |= ((MMAX(trtp, 4) - 3) << 28); /* 4 bit DRAM Clock */
  100. dtr1 &= ~DTR1_TRRD_MASK;
  101. dtr1 |= ((trrd - 4) << 24); /* 4 bit DRAM Clock */
  102. dtr1 &= ~DTR1_TCMD_MASK;
  103. dtr1 |= (1 << 4);
  104. dtr1 &= ~DTR1_TRAS_MASK;
  105. dtr1 |= ((tras - 14) << 20); /* 6 bit DRAM Clock */
  106. dtr1 &= ~DTR1_TFAW_MASK;
  107. dtr1 |= ((((tfaw + 1) >> 1) - 5) << 16);/* 4 bit DRAM Clock */
  108. /* Set 4 Clock CAS to CAS delay (multi-burst) */
  109. dtr1 &= ~DTR1_TCCD_MASK;
  110. dtr2 &= ~DTR2_TRRDR_MASK;
  111. dtr2 |= 1;
  112. dtr2 &= ~DTR2_TWWDR_MASK;
  113. dtr2 |= (2 << 8);
  114. dtr2 &= ~DTR2_TRWDR_MASK;
  115. dtr2 |= (2 << 16);
  116. dtr3 &= ~DTR3_TWRDR_MASK;
  117. dtr3 |= 2;
  118. dtr3 &= ~DTR3_TXXXX_MASK;
  119. dtr3 |= (2 << 4);
  120. dtr3 &= ~DTR3_TRWSR_MASK;
  121. if (mrc_params->ddr_speed == DDRFREQ_800) {
  122. /* Extended RW delay (+1) */
  123. dtr3 |= ((tcl - 5 + 1) << 8);
  124. } else if (mrc_params->ddr_speed == DDRFREQ_1066) {
  125. /* Extended RW delay (+1) */
  126. dtr3 |= ((tcl - 5 + 1) << 8);
  127. }
  128. dtr3 &= ~DTR3_TWRSR_MASK;
  129. dtr3 |= ((4 + wl + twtr - 11) << 13);
  130. dtr3 &= ~DTR3_TXP_MASK;
  131. if (mrc_params->ddr_speed == DDRFREQ_800)
  132. dtr3 |= ((MMAX(0, 1 - 1)) << 22);
  133. else
  134. dtr3 |= ((MMAX(0, 2 - 1)) << 22);
  135. dtr4 &= ~DTR4_WRODTSTRT_MASK;
  136. dtr4 |= 1;
  137. dtr4 &= ~DTR4_WRODTSTOP_MASK;
  138. dtr4 |= (1 << 4);
  139. dtr4 &= ~DTR4_XXXX1_MASK;
  140. dtr4 |= ((1 + tmp1 - tmp2 + 2) << 8);
  141. dtr4 &= ~DTR4_XXXX2_MASK;
  142. dtr4 |= ((1 + tmp1 - tmp2 + 2) << 12);
  143. dtr4 &= ~(DTR4_ODTDIS | DTR4_TRGSTRDIS);
  144. msg_port_write(MEM_CTLR, DTR0, dtr0);
  145. msg_port_write(MEM_CTLR, DTR1, dtr1);
  146. msg_port_write(MEM_CTLR, DTR2, dtr2);
  147. msg_port_write(MEM_CTLR, DTR3, dtr3);
  148. msg_port_write(MEM_CTLR, DTR4, dtr4);
  149. LEAVEFN();
  150. }
  151. /* Configure MCU before jedec init sequence */
  152. void prog_decode_before_jedec(struct mrc_params *mrc_params)
  153. {
  154. u32 drp;
  155. u32 drfc;
  156. u32 dcal;
  157. u32 dsch;
  158. u32 dpmc0;
  159. ENTERFN();
  160. /* Disable power saving features */
  161. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  162. dpmc0 |= (DPMC0_CLKGTDIS | DPMC0_DISPWRDN);
  163. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  164. dpmc0 &= ~DPMC0_DYNSREN;
  165. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  166. /* Disable out of order transactions */
  167. dsch = msg_port_read(MEM_CTLR, DSCH);
  168. dsch |= (DSCH_OOODIS | DSCH_NEWBYPDIS);
  169. msg_port_write(MEM_CTLR, DSCH, dsch);
  170. /* Disable issuing the REF command */
  171. drfc = msg_port_read(MEM_CTLR, DRFC);
  172. drfc &= ~DRFC_TREFI_MASK;
  173. msg_port_write(MEM_CTLR, DRFC, drfc);
  174. /* Disable ZQ calibration short */
  175. dcal = msg_port_read(MEM_CTLR, DCAL);
  176. dcal &= ~DCAL_ZQCINT_MASK;
  177. dcal &= ~DCAL_SRXZQCL_MASK;
  178. msg_port_write(MEM_CTLR, DCAL, dcal);
  179. /*
  180. * Training performed in address mode 0, rank population has limited
  181. * impact, however simulator complains if enabled non-existing rank.
  182. */
  183. drp = 0;
  184. if (mrc_params->rank_enables & 1)
  185. drp |= DRP_RKEN0;
  186. if (mrc_params->rank_enables & 2)
  187. drp |= DRP_RKEN1;
  188. msg_port_write(MEM_CTLR, DRP, drp);
  189. LEAVEFN();
  190. }
  191. /*
  192. * After Cold Reset, BIOS should set COLDWAKE bit to 1 before
  193. * sending the WAKE message to the Dunit.
  194. *
  195. * For Standby Exit, or any other mode in which the DRAM is in
  196. * SR, this bit must be set to 0.
  197. */
  198. void perform_ddr_reset(struct mrc_params *mrc_params)
  199. {
  200. ENTERFN();
  201. /* Set COLDWAKE bit before sending the WAKE message */
  202. mrc_write_mask(MEM_CTLR, DRMC, DRMC_COLDWAKE, DRMC_COLDWAKE);
  203. /* Send wake command to DUNIT (MUST be done before JEDEC) */
  204. dram_wake_command();
  205. /* Set default value */
  206. msg_port_write(MEM_CTLR, DRMC,
  207. mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0);
  208. LEAVEFN();
  209. }
  210. /*
  211. * This function performs some initialization on the DDRIO unit.
  212. * This function is dependent on BOARD_ID, DDR_SPEED, and CHANNEL_ENABLES.
  213. */
  214. void ddrphy_init(struct mrc_params *mrc_params)
  215. {
  216. uint32_t temp;
  217. uint8_t ch; /* channel counter */
  218. uint8_t rk; /* rank counter */
  219. uint8_t bl_grp; /* byte lane group counter (2 BLs per module) */
  220. uint8_t bl_divisor = 1; /* byte lane divisor */
  221. /* For DDR3 --> 0 == 800, 1 == 1066, 2 == 1333 */
  222. uint8_t speed = mrc_params->ddr_speed & 3;
  223. uint8_t cas;
  224. uint8_t cwl;
  225. ENTERFN();
  226. cas = mrc_params->params.cl;
  227. cwl = 5 + mrc_params->ddr_speed;
  228. /* ddrphy_init starts */
  229. mrc_post_code(0x03, 0x00);
  230. /*
  231. * HSD#231531
  232. * Make sure IOBUFACT is deasserted before initializing the DDR PHY
  233. *
  234. * HSD#234845
  235. * Make sure WRPTRENABLE is deasserted before initializing the DDR PHY
  236. */
  237. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  238. if (mrc_params->channel_enables & (1 << ch)) {
  239. /* Deassert DDRPHY Initialization Complete */
  240. mrc_alt_write_mask(DDRPHY,
  241. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  242. ~(1 << 20), 1 << 20); /* SPID_INIT_COMPLETE=0 */
  243. /* Deassert IOBUFACT */
  244. mrc_alt_write_mask(DDRPHY,
  245. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  246. ~(1 << 2), 1 << 2); /* IOBUFACTRST_N=0 */
  247. /* Disable WRPTR */
  248. mrc_alt_write_mask(DDRPHY,
  249. CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
  250. ~(1 << 0), 1 << 0); /* WRPTRENABLE=0 */
  251. }
  252. }
  253. /* Put PHY in reset */
  254. mrc_alt_write_mask(DDRPHY, MASTERRSTN, 0, 1);
  255. /* Initialize DQ01, DQ23, CMD, CLK-CTL, COMP modules */
  256. /* STEP0 */
  257. mrc_post_code(0x03, 0x10);
  258. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  259. if (mrc_params->channel_enables & (1 << ch)) {
  260. /* DQ01-DQ23 */
  261. for (bl_grp = 0;
  262. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  263. bl_grp++) {
  264. /* Analog MUX select - IO2xCLKSEL */
  265. mrc_alt_write_mask(DDRPHY,
  266. DQOBSCKEBBCTL +
  267. bl_grp * DDRIODQ_BL_OFFSET +
  268. ch * DDRIODQ_CH_OFFSET,
  269. bl_grp ? 0 : (1 << 22), 1 << 22);
  270. /* ODT Strength */
  271. switch (mrc_params->rd_odt_value) {
  272. case 1:
  273. temp = 0x3;
  274. break; /* 60 ohm */
  275. case 2:
  276. temp = 0x3;
  277. break; /* 120 ohm */
  278. case 3:
  279. temp = 0x3;
  280. break; /* 180 ohm */
  281. default:
  282. temp = 0x3;
  283. break; /* 120 ohm */
  284. }
  285. /* ODT strength */
  286. mrc_alt_write_mask(DDRPHY,
  287. B0RXIOBUFCTL +
  288. bl_grp * DDRIODQ_BL_OFFSET +
  289. ch * DDRIODQ_CH_OFFSET,
  290. temp << 5, 0x60);
  291. /* ODT strength */
  292. mrc_alt_write_mask(DDRPHY,
  293. B1RXIOBUFCTL +
  294. bl_grp * DDRIODQ_BL_OFFSET +
  295. ch * DDRIODQ_CH_OFFSET,
  296. temp << 5, 0x60);
  297. /* Dynamic ODT/DIFFAMP */
  298. temp = (cas << 24) | (cas << 16) |
  299. (cas << 8) | (cas << 0);
  300. switch (speed) {
  301. case 0:
  302. temp -= 0x01010101;
  303. break; /* 800 */
  304. case 1:
  305. temp -= 0x02020202;
  306. break; /* 1066 */
  307. case 2:
  308. temp -= 0x03030303;
  309. break; /* 1333 */
  310. case 3:
  311. temp -= 0x04040404;
  312. break; /* 1600 */
  313. }
  314. /* Launch Time: ODT, DIFFAMP, ODT, DIFFAMP */
  315. mrc_alt_write_mask(DDRPHY,
  316. B01LATCTL1 +
  317. bl_grp * DDRIODQ_BL_OFFSET +
  318. ch * DDRIODQ_CH_OFFSET,
  319. temp, 0x1f1f1f1f);
  320. switch (speed) {
  321. /* HSD#234715 */
  322. case 0:
  323. temp = (0x06 << 16) | (0x07 << 8);
  324. break; /* 800 */
  325. case 1:
  326. temp = (0x07 << 16) | (0x08 << 8);
  327. break; /* 1066 */
  328. case 2:
  329. temp = (0x09 << 16) | (0x0a << 8);
  330. break; /* 1333 */
  331. case 3:
  332. temp = (0x0a << 16) | (0x0b << 8);
  333. break; /* 1600 */
  334. }
  335. /* On Duration: ODT, DIFFAMP */
  336. mrc_alt_write_mask(DDRPHY,
  337. B0ONDURCTL +
  338. bl_grp * DDRIODQ_BL_OFFSET +
  339. ch * DDRIODQ_CH_OFFSET,
  340. temp, 0x003f3f00);
  341. /* On Duration: ODT, DIFFAMP */
  342. mrc_alt_write_mask(DDRPHY,
  343. B1ONDURCTL +
  344. bl_grp * DDRIODQ_BL_OFFSET +
  345. ch * DDRIODQ_CH_OFFSET,
  346. temp, 0x003f3f00);
  347. switch (mrc_params->rd_odt_value) {
  348. case 0:
  349. /* override DIFFAMP=on, ODT=off */
  350. temp = (0x3f << 16) | (0x3f << 10);
  351. break;
  352. default:
  353. /* override DIFFAMP=on, ODT=on */
  354. temp = (0x3f << 16) | (0x2a << 10);
  355. break;
  356. }
  357. /* Override: DIFFAMP, ODT */
  358. mrc_alt_write_mask(DDRPHY,
  359. B0OVRCTL +
  360. bl_grp * DDRIODQ_BL_OFFSET +
  361. ch * DDRIODQ_CH_OFFSET,
  362. temp, 0x003ffc00);
  363. /* Override: DIFFAMP, ODT */
  364. mrc_alt_write_mask(DDRPHY,
  365. B1OVRCTL +
  366. bl_grp * DDRIODQ_BL_OFFSET +
  367. ch * DDRIODQ_CH_OFFSET,
  368. temp, 0x003ffc00);
  369. /* DLL Setup */
  370. /* 1xCLK Domain Timings: tEDP,RCVEN,WDQS (PO) */
  371. mrc_alt_write_mask(DDRPHY,
  372. B0LATCTL0 +
  373. bl_grp * DDRIODQ_BL_OFFSET +
  374. ch * DDRIODQ_CH_OFFSET,
  375. ((cas + 7) << 16) | ((cas - 4) << 8) |
  376. ((cwl - 2) << 0), 0x003f1f1f);
  377. mrc_alt_write_mask(DDRPHY,
  378. B1LATCTL0 +
  379. bl_grp * DDRIODQ_BL_OFFSET +
  380. ch * DDRIODQ_CH_OFFSET,
  381. ((cas + 7) << 16) | ((cas - 4) << 8) |
  382. ((cwl - 2) << 0), 0x003f1f1f);
  383. /* RCVEN Bypass (PO) */
  384. mrc_alt_write_mask(DDRPHY,
  385. B0RXIOBUFCTL +
  386. bl_grp * DDRIODQ_BL_OFFSET +
  387. ch * DDRIODQ_CH_OFFSET,
  388. 0, 0x81);
  389. mrc_alt_write_mask(DDRPHY,
  390. B1RXIOBUFCTL +
  391. bl_grp * DDRIODQ_BL_OFFSET +
  392. ch * DDRIODQ_CH_OFFSET,
  393. 0, 0x81);
  394. /* TX */
  395. mrc_alt_write_mask(DDRPHY,
  396. DQCTL +
  397. bl_grp * DDRIODQ_BL_OFFSET +
  398. ch * DDRIODQ_CH_OFFSET,
  399. 1 << 16, 1 << 16);
  400. mrc_alt_write_mask(DDRPHY,
  401. B01PTRCTL1 +
  402. bl_grp * DDRIODQ_BL_OFFSET +
  403. ch * DDRIODQ_CH_OFFSET,
  404. 1 << 8, 1 << 8);
  405. /* RX (PO) */
  406. /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
  407. mrc_alt_write_mask(DDRPHY,
  408. B0VREFCTL +
  409. bl_grp * DDRIODQ_BL_OFFSET +
  410. ch * DDRIODQ_CH_OFFSET,
  411. (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
  412. 0xff);
  413. /* Internal Vref Code, Enable#, Ext_or_Int (1=Ext) */
  414. mrc_alt_write_mask(DDRPHY,
  415. B1VREFCTL +
  416. bl_grp * DDRIODQ_BL_OFFSET +
  417. ch * DDRIODQ_CH_OFFSET,
  418. (0x03 << 2) | (0x0 << 1) | (0x0 << 0),
  419. 0xff);
  420. /* Per-Bit De-Skew Enable */
  421. mrc_alt_write_mask(DDRPHY,
  422. B0RXIOBUFCTL +
  423. bl_grp * DDRIODQ_BL_OFFSET +
  424. ch * DDRIODQ_CH_OFFSET,
  425. 0, 0x10);
  426. /* Per-Bit De-Skew Enable */
  427. mrc_alt_write_mask(DDRPHY,
  428. B1RXIOBUFCTL +
  429. bl_grp * DDRIODQ_BL_OFFSET +
  430. ch * DDRIODQ_CH_OFFSET,
  431. 0, 0x10);
  432. }
  433. /* CLKEBB */
  434. mrc_alt_write_mask(DDRPHY,
  435. CMDOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
  436. 0, 1 << 23);
  437. /* Enable tristate control of cmd/address bus */
  438. mrc_alt_write_mask(DDRPHY,
  439. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  440. 0, 0x03);
  441. /* ODT RCOMP */
  442. mrc_alt_write_mask(DDRPHY,
  443. CMDRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
  444. (0x03 << 5) | (0x03 << 0), 0x3ff);
  445. /* CMDPM* registers must be programmed in this order */
  446. /* Turn On Delays: SFR (regulator), MPLL */
  447. mrc_alt_write_mask(DDRPHY,
  448. CMDPMDLYREG4 + ch * DDRIOCCC_CH_OFFSET,
  449. 0xffffffff, 0xffffffff);
  450. /*
  451. * Delays: ASSERT_IOBUFACT_to_ALLON0_for_PM_MSG_3,
  452. * VREG (MDLL) Turn On, ALLON0_to_DEASSERT_IOBUFACT
  453. * for_PM_MSG_gt0, MDLL Turn On
  454. */
  455. mrc_alt_write_mask(DDRPHY,
  456. CMDPMDLYREG3 + ch * DDRIOCCC_CH_OFFSET,
  457. 0xfffff616, 0xffffffff);
  458. /* MPLL Divider Reset Delays */
  459. mrc_alt_write_mask(DDRPHY,
  460. CMDPMDLYREG2 + ch * DDRIOCCC_CH_OFFSET,
  461. 0xffffffff, 0xffffffff);
  462. /* Turn Off Delays: VREG, Staggered MDLL, MDLL, PI */
  463. mrc_alt_write_mask(DDRPHY,
  464. CMDPMDLYREG1 + ch * DDRIOCCC_CH_OFFSET,
  465. 0xffffffff, 0xffffffff);
  466. /* Turn On Delays: MPLL, Staggered MDLL, PI, IOBUFACT */
  467. mrc_alt_write_mask(DDRPHY,
  468. CMDPMDLYREG0 + ch * DDRIOCCC_CH_OFFSET,
  469. 0xffffffff, 0xffffffff);
  470. /* Allow PUnit signals */
  471. mrc_alt_write_mask(DDRPHY,
  472. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  473. (0x6 << 8) | (0x1 << 6) | (0x4 << 0),
  474. 0xffe00f4f);
  475. /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
  476. mrc_alt_write_mask(DDRPHY,
  477. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  478. (0x3 << 4) | (0x7 << 0), 0x7f);
  479. /* CLK-CTL */
  480. mrc_alt_write_mask(DDRPHY,
  481. CCOBSCKEBBCTL + ch * DDRIOCCC_CH_OFFSET,
  482. 0, 1 << 24); /* CLKEBB */
  483. /* Buffer Enable: CS,CKE,ODT,CLK */
  484. mrc_alt_write_mask(DDRPHY,
  485. CCCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  486. 0x1f, 0x000ffff1);
  487. /* ODT RCOMP */
  488. mrc_alt_write_mask(DDRPHY,
  489. CCRCOMPODT + ch * DDRIOCCC_CH_OFFSET,
  490. (0x03 << 8) | (0x03 << 0), 0x00001f1f);
  491. /* DLL_VREG Bias Trim, VREF Tuning for DLL_VREG */
  492. mrc_alt_write_mask(DDRPHY,
  493. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  494. (0x3 << 4) | (0x7 << 0), 0x7f);
  495. /*
  496. * COMP (RON channel specific)
  497. * - DQ/DQS/DM RON: 32 Ohm
  498. * - CTRL/CMD RON: 27 Ohm
  499. * - CLK RON: 26 Ohm
  500. */
  501. /* RCOMP Vref PU/PD */
  502. mrc_alt_write_mask(DDRPHY,
  503. DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  504. (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
  505. /* RCOMP Vref PU/PD */
  506. mrc_alt_write_mask(DDRPHY,
  507. CMDVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  508. (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
  509. /* RCOMP Vref PU/PD */
  510. mrc_alt_write_mask(DDRPHY,
  511. CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  512. (0x0F << 24) | (0x03 << 16), 0x3f3f0000);
  513. /* RCOMP Vref PU/PD */
  514. mrc_alt_write_mask(DDRPHY,
  515. DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  516. (0x08 << 24) | (0x03 << 16), 0x3f3f0000);
  517. /* RCOMP Vref PU/PD */
  518. mrc_alt_write_mask(DDRPHY,
  519. CTLVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  520. (0x0C << 24) | (0x03 << 16), 0x3f3f0000);
  521. /* DQS Swapped Input Enable */
  522. mrc_alt_write_mask(DDRPHY,
  523. COMPEN1CH0 + ch * DDRCOMP_CH_OFFSET,
  524. (1 << 19) | (1 << 17), 0xc00ac000);
  525. /* ODT VREF = 1.5 x 274/360+274 = 0.65V (code of ~50) */
  526. /* ODT Vref PU/PD */
  527. mrc_alt_write_mask(DDRPHY,
  528. DQVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  529. (0x32 << 8) | (0x03 << 0), 0x00003f3f);
  530. /* ODT Vref PU/PD */
  531. mrc_alt_write_mask(DDRPHY,
  532. DQSVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  533. (0x32 << 8) | (0x03 << 0), 0x00003f3f);
  534. /* ODT Vref PU/PD */
  535. mrc_alt_write_mask(DDRPHY,
  536. CLKVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  537. (0x0E << 8) | (0x05 << 0), 0x00003f3f);
  538. /*
  539. * Slew rate settings are frequency specific,
  540. * numbers below are for 800Mhz (speed == 0)
  541. * - DQ/DQS/DM/CLK SR: 4V/ns,
  542. * - CTRL/CMD SR: 1.5V/ns
  543. */
  544. temp = (0x0e << 16) | (0x0e << 12) | (0x08 << 8) |
  545. (0x0b << 4) | (0x0b << 0);
  546. /* DCOMP Delay Select: CTL,CMD,CLK,DQS,DQ */
  547. mrc_alt_write_mask(DDRPHY,
  548. DLYSELCH0 + ch * DDRCOMP_CH_OFFSET,
  549. temp, 0x000fffff);
  550. /* TCO Vref CLK,DQS,DQ */
  551. mrc_alt_write_mask(DDRPHY,
  552. TCOVREFCH0 + ch * DDRCOMP_CH_OFFSET,
  553. (0x05 << 16) | (0x05 << 8) | (0x05 << 0),
  554. 0x003f3f3f);
  555. /* ODTCOMP CMD/CTL PU/PD */
  556. mrc_alt_write_mask(DDRPHY,
  557. CCBUFODTCH0 + ch * DDRCOMP_CH_OFFSET,
  558. (0x03 << 8) | (0x03 << 0),
  559. 0x00001f1f);
  560. /* COMP */
  561. mrc_alt_write_mask(DDRPHY,
  562. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  563. 0, 0xc0000100);
  564. #ifdef BACKUP_COMPS
  565. /* DQ COMP Overrides */
  566. /* RCOMP PU */
  567. mrc_alt_write_mask(DDRPHY,
  568. DQDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  569. (1 << 31) | (0x0a << 16),
  570. 0x801f0000);
  571. /* RCOMP PD */
  572. mrc_alt_write_mask(DDRPHY,
  573. DQDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  574. (1 << 31) | (0x0a << 16),
  575. 0x801f0000);
  576. /* DCOMP PU */
  577. mrc_alt_write_mask(DDRPHY,
  578. DQDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  579. (1 << 31) | (0x10 << 16),
  580. 0x801f0000);
  581. /* DCOMP PD */
  582. mrc_alt_write_mask(DDRPHY,
  583. DQDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  584. (1 << 31) | (0x10 << 16),
  585. 0x801f0000);
  586. /* ODTCOMP PU */
  587. mrc_alt_write_mask(DDRPHY,
  588. DQODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  589. (1 << 31) | (0x0b << 16),
  590. 0x801f0000);
  591. /* ODTCOMP PD */
  592. mrc_alt_write_mask(DDRPHY,
  593. DQODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  594. (1 << 31) | (0x0b << 16),
  595. 0x801f0000);
  596. /* TCOCOMP PU */
  597. mrc_alt_write_mask(DDRPHY,
  598. DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  599. 1 << 31, 1 << 31);
  600. /* TCOCOMP PD */
  601. mrc_alt_write_mask(DDRPHY,
  602. DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  603. 1 << 31, 1 << 31);
  604. /* DQS COMP Overrides */
  605. /* RCOMP PU */
  606. mrc_alt_write_mask(DDRPHY,
  607. DQSDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  608. (1 << 31) | (0x0a << 16),
  609. 0x801f0000);
  610. /* RCOMP PD */
  611. mrc_alt_write_mask(DDRPHY,
  612. DQSDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  613. (1 << 31) | (0x0a << 16),
  614. 0x801f0000);
  615. /* DCOMP PU */
  616. mrc_alt_write_mask(DDRPHY,
  617. DQSDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  618. (1 << 31) | (0x10 << 16),
  619. 0x801f0000);
  620. /* DCOMP PD */
  621. mrc_alt_write_mask(DDRPHY,
  622. DQSDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  623. (1 << 31) | (0x10 << 16),
  624. 0x801f0000);
  625. /* ODTCOMP PU */
  626. mrc_alt_write_mask(DDRPHY,
  627. DQSODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  628. (1 << 31) | (0x0b << 16),
  629. 0x801f0000);
  630. /* ODTCOMP PD */
  631. mrc_alt_write_mask(DDRPHY,
  632. DQSODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  633. (1 << 31) | (0x0b << 16),
  634. 0x801f0000);
  635. /* TCOCOMP PU */
  636. mrc_alt_write_mask(DDRPHY,
  637. DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  638. 1 << 31, 1 << 31);
  639. /* TCOCOMP PD */
  640. mrc_alt_write_mask(DDRPHY,
  641. DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  642. 1 << 31, 1 << 31);
  643. /* CLK COMP Overrides */
  644. /* RCOMP PU */
  645. mrc_alt_write_mask(DDRPHY,
  646. CLKDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  647. (1 << 31) | (0x0c << 16),
  648. 0x801f0000);
  649. /* RCOMP PD */
  650. mrc_alt_write_mask(DDRPHY,
  651. CLKDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  652. (1 << 31) | (0x0c << 16),
  653. 0x801f0000);
  654. /* DCOMP PU */
  655. mrc_alt_write_mask(DDRPHY,
  656. CLKDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  657. (1 << 31) | (0x07 << 16),
  658. 0x801f0000);
  659. /* DCOMP PD */
  660. mrc_alt_write_mask(DDRPHY,
  661. CLKDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  662. (1 << 31) | (0x07 << 16),
  663. 0x801f0000);
  664. /* ODTCOMP PU */
  665. mrc_alt_write_mask(DDRPHY,
  666. CLKODTPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  667. (1 << 31) | (0x0b << 16),
  668. 0x801f0000);
  669. /* ODTCOMP PD */
  670. mrc_alt_write_mask(DDRPHY,
  671. CLKODTPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  672. (1 << 31) | (0x0b << 16),
  673. 0x801f0000);
  674. /* TCOCOMP PU */
  675. mrc_alt_write_mask(DDRPHY,
  676. CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  677. 1 << 31, 1 << 31);
  678. /* TCOCOMP PD */
  679. mrc_alt_write_mask(DDRPHY,
  680. CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  681. 1 << 31, 1 << 31);
  682. /* CMD COMP Overrides */
  683. /* RCOMP PU */
  684. mrc_alt_write_mask(DDRPHY,
  685. CMDDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  686. (1 << 31) | (0x0d << 16),
  687. 0x803f0000);
  688. /* RCOMP PD */
  689. mrc_alt_write_mask(DDRPHY,
  690. CMDDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  691. (1 << 31) | (0x0d << 16),
  692. 0x803f0000);
  693. /* DCOMP PU */
  694. mrc_alt_write_mask(DDRPHY,
  695. CMDDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  696. (1 << 31) | (0x0a << 16),
  697. 0x801f0000);
  698. /* DCOMP PD */
  699. mrc_alt_write_mask(DDRPHY,
  700. CMDDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  701. (1 << 31) | (0x0a << 16),
  702. 0x801f0000);
  703. /* CTL COMP Overrides */
  704. /* RCOMP PU */
  705. mrc_alt_write_mask(DDRPHY,
  706. CTLDRVPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  707. (1 << 31) | (0x0d << 16),
  708. 0x803f0000);
  709. /* RCOMP PD */
  710. mrc_alt_write_mask(DDRPHY,
  711. CTLDRVPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  712. (1 << 31) | (0x0d << 16),
  713. 0x803f0000);
  714. /* DCOMP PU */
  715. mrc_alt_write_mask(DDRPHY,
  716. CTLDLYPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  717. (1 << 31) | (0x0a << 16),
  718. 0x801f0000);
  719. /* DCOMP PD */
  720. mrc_alt_write_mask(DDRPHY,
  721. CTLDLYPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  722. (1 << 31) | (0x0a << 16),
  723. 0x801f0000);
  724. #else
  725. /* DQ TCOCOMP Overrides */
  726. /* TCOCOMP PU */
  727. mrc_alt_write_mask(DDRPHY,
  728. DQTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  729. (1 << 31) | (0x1f << 16),
  730. 0x801f0000);
  731. /* TCOCOMP PD */
  732. mrc_alt_write_mask(DDRPHY,
  733. DQTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  734. (1 << 31) | (0x1f << 16),
  735. 0x801f0000);
  736. /* DQS TCOCOMP Overrides */
  737. /* TCOCOMP PU */
  738. mrc_alt_write_mask(DDRPHY,
  739. DQSTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  740. (1 << 31) | (0x1f << 16),
  741. 0x801f0000);
  742. /* TCOCOMP PD */
  743. mrc_alt_write_mask(DDRPHY,
  744. DQSTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  745. (1 << 31) | (0x1f << 16),
  746. 0x801f0000);
  747. /* CLK TCOCOMP Overrides */
  748. /* TCOCOMP PU */
  749. mrc_alt_write_mask(DDRPHY,
  750. CLKTCOPUCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  751. (1 << 31) | (0x1f << 16),
  752. 0x801f0000);
  753. /* TCOCOMP PD */
  754. mrc_alt_write_mask(DDRPHY,
  755. CLKTCOPDCTLCH0 + ch * DDRCOMP_CH_OFFSET,
  756. (1 << 31) | (0x1f << 16),
  757. 0x801f0000);
  758. #endif
  759. /* program STATIC delays */
  760. #ifdef BACKUP_WCMD
  761. set_wcmd(ch, ddr_wcmd[PLATFORM_ID]);
  762. #else
  763. set_wcmd(ch, ddr_wclk[PLATFORM_ID] + HALF_CLK);
  764. #endif
  765. for (rk = 0; rk < NUM_RANKS; rk++) {
  766. if (mrc_params->rank_enables & (1 << rk)) {
  767. set_wclk(ch, rk, ddr_wclk[PLATFORM_ID]);
  768. #ifdef BACKUP_WCTL
  769. set_wctl(ch, rk, ddr_wctl[PLATFORM_ID]);
  770. #else
  771. set_wctl(ch, rk, ddr_wclk[PLATFORM_ID] + HALF_CLK);
  772. #endif
  773. }
  774. }
  775. }
  776. }
  777. /* COMP (non channel specific) */
  778. /* RCOMP: Dither PU Enable */
  779. mrc_alt_write_mask(DDRPHY, DQANADRVPUCTL, 1 << 30, 1 << 30);
  780. /* RCOMP: Dither PD Enable */
  781. mrc_alt_write_mask(DDRPHY, DQANADRVPDCTL, 1 << 30, 1 << 30);
  782. /* RCOMP: Dither PU Enable */
  783. mrc_alt_write_mask(DDRPHY, CMDANADRVPUCTL, 1 << 30, 1 << 30);
  784. /* RCOMP: Dither PD Enable */
  785. mrc_alt_write_mask(DDRPHY, CMDANADRVPDCTL, 1 << 30, 1 << 30);
  786. /* RCOMP: Dither PU Enable */
  787. mrc_alt_write_mask(DDRPHY, CLKANADRVPUCTL, 1 << 30, 1 << 30);
  788. /* RCOMP: Dither PD Enable */
  789. mrc_alt_write_mask(DDRPHY, CLKANADRVPDCTL, 1 << 30, 1 << 30);
  790. /* RCOMP: Dither PU Enable */
  791. mrc_alt_write_mask(DDRPHY, DQSANADRVPUCTL, 1 << 30, 1 << 30);
  792. /* RCOMP: Dither PD Enable */
  793. mrc_alt_write_mask(DDRPHY, DQSANADRVPDCTL, 1 << 30, 1 << 30);
  794. /* RCOMP: Dither PU Enable */
  795. mrc_alt_write_mask(DDRPHY, CTLANADRVPUCTL, 1 << 30, 1 << 30);
  796. /* RCOMP: Dither PD Enable */
  797. mrc_alt_write_mask(DDRPHY, CTLANADRVPDCTL, 1 << 30, 1 << 30);
  798. /* ODT: Dither PU Enable */
  799. mrc_alt_write_mask(DDRPHY, DQANAODTPUCTL, 1 << 30, 1 << 30);
  800. /* ODT: Dither PD Enable */
  801. mrc_alt_write_mask(DDRPHY, DQANAODTPDCTL, 1 << 30, 1 << 30);
  802. /* ODT: Dither PU Enable */
  803. mrc_alt_write_mask(DDRPHY, CLKANAODTPUCTL, 1 << 30, 1 << 30);
  804. /* ODT: Dither PD Enable */
  805. mrc_alt_write_mask(DDRPHY, CLKANAODTPDCTL, 1 << 30, 1 << 30);
  806. /* ODT: Dither PU Enable */
  807. mrc_alt_write_mask(DDRPHY, DQSANAODTPUCTL, 1 << 30, 1 << 30);
  808. /* ODT: Dither PD Enable */
  809. mrc_alt_write_mask(DDRPHY, DQSANAODTPDCTL, 1 << 30, 1 << 30);
  810. /* DCOMP: Dither PU Enable */
  811. mrc_alt_write_mask(DDRPHY, DQANADLYPUCTL, 1 << 30, 1 << 30);
  812. /* DCOMP: Dither PD Enable */
  813. mrc_alt_write_mask(DDRPHY, DQANADLYPDCTL, 1 << 30, 1 << 30);
  814. /* DCOMP: Dither PU Enable */
  815. mrc_alt_write_mask(DDRPHY, CMDANADLYPUCTL, 1 << 30, 1 << 30);
  816. /* DCOMP: Dither PD Enable */
  817. mrc_alt_write_mask(DDRPHY, CMDANADLYPDCTL, 1 << 30, 1 << 30);
  818. /* DCOMP: Dither PU Enable */
  819. mrc_alt_write_mask(DDRPHY, CLKANADLYPUCTL, 1 << 30, 1 << 30);
  820. /* DCOMP: Dither PD Enable */
  821. mrc_alt_write_mask(DDRPHY, CLKANADLYPDCTL, 1 << 30, 1 << 30);
  822. /* DCOMP: Dither PU Enable */
  823. mrc_alt_write_mask(DDRPHY, DQSANADLYPUCTL, 1 << 30, 1 << 30);
  824. /* DCOMP: Dither PD Enable */
  825. mrc_alt_write_mask(DDRPHY, DQSANADLYPDCTL, 1 << 30, 1 << 30);
  826. /* DCOMP: Dither PU Enable */
  827. mrc_alt_write_mask(DDRPHY, CTLANADLYPUCTL, 1 << 30, 1 << 30);
  828. /* DCOMP: Dither PD Enable */
  829. mrc_alt_write_mask(DDRPHY, CTLANADLYPDCTL, 1 << 30, 1 << 30);
  830. /* TCO: Dither PU Enable */
  831. mrc_alt_write_mask(DDRPHY, DQANATCOPUCTL, 1 << 30, 1 << 30);
  832. /* TCO: Dither PD Enable */
  833. mrc_alt_write_mask(DDRPHY, DQANATCOPDCTL, 1 << 30, 1 << 30);
  834. /* TCO: Dither PU Enable */
  835. mrc_alt_write_mask(DDRPHY, CLKANATCOPUCTL, 1 << 30, 1 << 30);
  836. /* TCO: Dither PD Enable */
  837. mrc_alt_write_mask(DDRPHY, CLKANATCOPDCTL, 1 << 30, 1 << 30);
  838. /* TCO: Dither PU Enable */
  839. mrc_alt_write_mask(DDRPHY, DQSANATCOPUCTL, 1 << 30, 1 << 30);
  840. /* TCO: Dither PD Enable */
  841. mrc_alt_write_mask(DDRPHY, DQSANATCOPDCTL, 1 << 30, 1 << 30);
  842. /* TCOCOMP: Pulse Count */
  843. mrc_alt_write_mask(DDRPHY, TCOCNTCTRL, 1, 3);
  844. /* ODT: CMD/CTL PD/PU */
  845. mrc_alt_write_mask(DDRPHY, CHNLBUFSTATIC,
  846. (0x03 << 24) | (0x03 << 16), 0x1f1f0000);
  847. /* Set 1us counter */
  848. mrc_alt_write_mask(DDRPHY, MSCNTR, 0x64, 0xff);
  849. mrc_alt_write_mask(DDRPHY, LATCH1CTL, 0x1 << 28, 0x70000000);
  850. /* Release PHY from reset */
  851. mrc_alt_write_mask(DDRPHY, MASTERRSTN, 1, 1);
  852. /* STEP1 */
  853. mrc_post_code(0x03, 0x11);
  854. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  855. if (mrc_params->channel_enables & (1 << ch)) {
  856. /* DQ01-DQ23 */
  857. for (bl_grp = 0;
  858. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  859. bl_grp++) {
  860. mrc_alt_write_mask(DDRPHY,
  861. DQMDLLCTL +
  862. bl_grp * DDRIODQ_BL_OFFSET +
  863. ch * DDRIODQ_CH_OFFSET,
  864. 1 << 13,
  865. 1 << 13); /* Enable VREG */
  866. delay_n(3);
  867. }
  868. /* ECC */
  869. mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
  870. 1 << 13, 1 << 13); /* Enable VREG */
  871. delay_n(3);
  872. /* CMD */
  873. mrc_alt_write_mask(DDRPHY,
  874. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  875. 1 << 13, 1 << 13); /* Enable VREG */
  876. delay_n(3);
  877. /* CLK-CTL */
  878. mrc_alt_write_mask(DDRPHY,
  879. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  880. 1 << 13, 1 << 13); /* Enable VREG */
  881. delay_n(3);
  882. }
  883. }
  884. /* STEP2 */
  885. mrc_post_code(0x03, 0x12);
  886. delay_n(200);
  887. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  888. if (mrc_params->channel_enables & (1 << ch)) {
  889. /* DQ01-DQ23 */
  890. for (bl_grp = 0;
  891. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  892. bl_grp++) {
  893. mrc_alt_write_mask(DDRPHY,
  894. DQMDLLCTL +
  895. bl_grp * DDRIODQ_BL_OFFSET +
  896. ch * DDRIODQ_CH_OFFSET,
  897. 1 << 17,
  898. 1 << 17); /* Enable MCDLL */
  899. delay_n(50);
  900. }
  901. /* ECC */
  902. mrc_alt_write_mask(DDRPHY, ECCMDLLCTL,
  903. 1 << 17, 1 << 17); /* Enable MCDLL */
  904. delay_n(50);
  905. /* CMD */
  906. mrc_alt_write_mask(DDRPHY,
  907. CMDMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  908. 1 << 18, 1 << 18); /* Enable MCDLL */
  909. delay_n(50);
  910. /* CLK-CTL */
  911. mrc_alt_write_mask(DDRPHY,
  912. CCMDLLCTL + ch * DDRIOCCC_CH_OFFSET,
  913. 1 << 18, 1 << 18); /* Enable MCDLL */
  914. delay_n(50);
  915. }
  916. }
  917. /* STEP3: */
  918. mrc_post_code(0x03, 0x13);
  919. delay_n(100);
  920. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  921. if (mrc_params->channel_enables & (1 << ch)) {
  922. /* DQ01-DQ23 */
  923. for (bl_grp = 0;
  924. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  925. bl_grp++) {
  926. #ifdef FORCE_16BIT_DDRIO
  927. temp = (bl_grp &&
  928. (mrc_params->channel_width == X16)) ?
  929. 0x11ff : 0xffff;
  930. #else
  931. temp = 0xffff;
  932. #endif
  933. /* Enable TXDLL */
  934. mrc_alt_write_mask(DDRPHY,
  935. DQDLLTXCTL +
  936. bl_grp * DDRIODQ_BL_OFFSET +
  937. ch * DDRIODQ_CH_OFFSET,
  938. temp, 0xffff);
  939. delay_n(3);
  940. /* Enable RXDLL */
  941. mrc_alt_write_mask(DDRPHY,
  942. DQDLLRXCTL +
  943. bl_grp * DDRIODQ_BL_OFFSET +
  944. ch * DDRIODQ_CH_OFFSET,
  945. 0xf, 0xf);
  946. delay_n(3);
  947. /* Enable RXDLL Overrides BL0 */
  948. mrc_alt_write_mask(DDRPHY,
  949. B0OVRCTL +
  950. bl_grp * DDRIODQ_BL_OFFSET +
  951. ch * DDRIODQ_CH_OFFSET,
  952. 0xf, 0xf);
  953. }
  954. /* ECC */
  955. temp = 0xffff;
  956. mrc_alt_write_mask(DDRPHY, ECCDLLTXCTL,
  957. temp, 0xffff);
  958. delay_n(3);
  959. /* CMD (PO) */
  960. mrc_alt_write_mask(DDRPHY,
  961. CMDDLLTXCTL + ch * DDRIOCCC_CH_OFFSET,
  962. temp, 0xffff);
  963. delay_n(3);
  964. }
  965. }
  966. /* STEP4 */
  967. mrc_post_code(0x03, 0x14);
  968. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  969. if (mrc_params->channel_enables & (1 << ch)) {
  970. /* Host To Memory Clock Alignment (HMC) for 800/1066 */
  971. for (bl_grp = 0;
  972. bl_grp < (NUM_BYTE_LANES / bl_divisor) / 2;
  973. bl_grp++) {
  974. /* CLK_ALIGN_MOD_ID */
  975. mrc_alt_write_mask(DDRPHY,
  976. DQCLKALIGNREG2 +
  977. bl_grp * DDRIODQ_BL_OFFSET +
  978. ch * DDRIODQ_CH_OFFSET,
  979. bl_grp ? 3 : 1,
  980. 0xf);
  981. }
  982. mrc_alt_write_mask(DDRPHY,
  983. ECCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  984. 0x2, 0xf);
  985. mrc_alt_write_mask(DDRPHY,
  986. CMDCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  987. 0x0, 0xf);
  988. mrc_alt_write_mask(DDRPHY,
  989. CCCLKALIGNREG2 + ch * DDRIODQ_CH_OFFSET,
  990. 0x2, 0xf);
  991. mrc_alt_write_mask(DDRPHY,
  992. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
  993. 0x20, 0x30);
  994. /*
  995. * NUM_SAMPLES, MAX_SAMPLES,
  996. * MACRO_PI_STEP, MICRO_PI_STEP
  997. */
  998. mrc_alt_write_mask(DDRPHY,
  999. CMDCLKALIGNREG1 + ch * DDRIOCCC_CH_OFFSET,
  1000. (0x18 << 16) | (0x10 << 8) |
  1001. (0x8 << 2) | (0x1 << 0),
  1002. 0x007f7fff);
  1003. /* TOTAL_NUM_MODULES, FIRST_U_PARTITION */
  1004. mrc_alt_write_mask(DDRPHY,
  1005. CMDCLKALIGNREG2 + ch * DDRIOCCC_CH_OFFSET,
  1006. (0x10 << 16) | (0x4 << 8) | (0x2 << 4),
  1007. 0x001f0ff0);
  1008. #ifdef HMC_TEST
  1009. /* START_CLK_ALIGN=1 */
  1010. mrc_alt_write_mask(DDRPHY,
  1011. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET,
  1012. 1 << 24, 1 << 24);
  1013. while (msg_port_alt_read(DDRPHY,
  1014. CMDCLKALIGNREG0 + ch * DDRIOCCC_CH_OFFSET) &
  1015. (1 << 24))
  1016. ; /* wait for START_CLK_ALIGN=0 */
  1017. #endif
  1018. /* Set RD/WR Pointer Seperation & COUNTEN & FIFOPTREN */
  1019. mrc_alt_write_mask(DDRPHY,
  1020. CMDPTRREG + ch * DDRIOCCC_CH_OFFSET,
  1021. 1, 1); /* WRPTRENABLE=1 */
  1022. /* COMP initial */
  1023. /* enable bypass for CLK buffer (PO) */
  1024. mrc_alt_write_mask(DDRPHY,
  1025. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  1026. 1 << 5, 1 << 5);
  1027. /* Initial COMP Enable */
  1028. mrc_alt_write_mask(DDRPHY, CMPCTRL, 1, 1);
  1029. /* wait for Initial COMP Enable = 0 */
  1030. while (msg_port_alt_read(DDRPHY, CMPCTRL) & 1)
  1031. ;
  1032. /* disable bypass for CLK buffer (PO) */
  1033. mrc_alt_write_mask(DDRPHY,
  1034. COMPEN0CH0 + ch * DDRCOMP_CH_OFFSET,
  1035. ~(1 << 5), 1 << 5);
  1036. /* IOBUFACT */
  1037. /* STEP4a */
  1038. mrc_alt_write_mask(DDRPHY,
  1039. CMDCFGREG0 + ch * DDRIOCCC_CH_OFFSET,
  1040. 1 << 2, 1 << 2); /* IOBUFACTRST_N=1 */
  1041. /* DDRPHY initialization complete */
  1042. mrc_alt_write_mask(DDRPHY,
  1043. CMDPMCONFIG0 + ch * DDRIOCCC_CH_OFFSET,
  1044. 1 << 20, 1 << 20); /* SPID_INIT_COMPLETE=1 */
  1045. }
  1046. }
  1047. LEAVEFN();
  1048. }
  1049. /* This function performs JEDEC initialization on all enabled channels */
  1050. void perform_jedec_init(struct mrc_params *mrc_params)
  1051. {
  1052. uint8_t twr, wl, rank;
  1053. uint32_t tck;
  1054. u32 dtr0;
  1055. u32 drp;
  1056. u32 drmc;
  1057. u32 mrs0_cmd = 0;
  1058. u32 emrs1_cmd = 0;
  1059. u32 emrs2_cmd = 0;
  1060. u32 emrs3_cmd = 0;
  1061. ENTERFN();
  1062. /* jedec_init starts */
  1063. mrc_post_code(0x04, 0x00);
  1064. /* DDR3_RESET_SET=0, DDR3_RESET_RESET=1 */
  1065. mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 2, 0x102);
  1066. /* Assert RESET# for 200us */
  1067. delay_u(200);
  1068. /* DDR3_RESET_SET=1, DDR3_RESET_RESET=0 */
  1069. mrc_alt_write_mask(DDRPHY, CCDDR3RESETCTL, 0x100, 0x102);
  1070. dtr0 = msg_port_read(MEM_CTLR, DTR0);
  1071. /*
  1072. * Set CKEVAL for populated ranks
  1073. * then send NOP to each rank (#4550197)
  1074. */
  1075. drp = msg_port_read(MEM_CTLR, DRP);
  1076. drp &= 0x3;
  1077. drmc = msg_port_read(MEM_CTLR, DRMC);
  1078. drmc &= 0xfffffffc;
  1079. drmc |= (DRMC_CKEMODE | drp);
  1080. msg_port_write(MEM_CTLR, DRMC, drmc);
  1081. for (rank = 0; rank < NUM_RANKS; rank++) {
  1082. /* Skip to next populated rank */
  1083. if ((mrc_params->rank_enables & (1 << rank)) == 0)
  1084. continue;
  1085. dram_init_command(DCMD_NOP(rank));
  1086. }
  1087. msg_port_write(MEM_CTLR, DRMC,
  1088. (mrc_params->rd_odt_value == 0 ? DRMC_ODTMODE : 0));
  1089. /*
  1090. * setup for emrs 2
  1091. * BIT[15:11] --> Always "0"
  1092. * BIT[10:09] --> Rtt_WR: want "Dynamic ODT Off" (0)
  1093. * BIT[08] --> Always "0"
  1094. * BIT[07] --> SRT: use sr_temp_range
  1095. * BIT[06] --> ASR: want "Manual SR Reference" (0)
  1096. * BIT[05:03] --> CWL: use oem_tCWL
  1097. * BIT[02:00] --> PASR: want "Full Array" (0)
  1098. */
  1099. emrs2_cmd |= (2 << 3);
  1100. wl = 5 + mrc_params->ddr_speed;
  1101. emrs2_cmd |= ((wl - 5) << 9);
  1102. emrs2_cmd |= (mrc_params->sr_temp_range << 13);
  1103. /*
  1104. * setup for emrs 3
  1105. * BIT[15:03] --> Always "0"
  1106. * BIT[02] --> MPR: want "Normal Operation" (0)
  1107. * BIT[01:00] --> MPR_Loc: want "Predefined Pattern" (0)
  1108. */
  1109. emrs3_cmd |= (3 << 3);
  1110. /*
  1111. * setup for emrs 1
  1112. * BIT[15:13] --> Always "0"
  1113. * BIT[12:12] --> Qoff: want "Output Buffer Enabled" (0)
  1114. * BIT[11:11] --> TDQS: want "Disabled" (0)
  1115. * BIT[10:10] --> Always "0"
  1116. * BIT[09,06,02] --> Rtt_nom: use rtt_nom_value
  1117. * BIT[08] --> Always "0"
  1118. * BIT[07] --> WR_LVL: want "Disabled" (0)
  1119. * BIT[05,01] --> DIC: use ron_value
  1120. * BIT[04:03] --> AL: additive latency want "0" (0)
  1121. * BIT[00] --> DLL: want "Enable" (0)
  1122. *
  1123. * (BIT5|BIT1) set Ron value
  1124. * 00 --> RZQ/6 (40ohm)
  1125. * 01 --> RZQ/7 (34ohm)
  1126. * 1* --> RESERVED
  1127. *
  1128. * (BIT9|BIT6|BIT2) set Rtt_nom value
  1129. * 000 --> Disabled
  1130. * 001 --> RZQ/4 ( 60ohm)
  1131. * 010 --> RZQ/2 (120ohm)
  1132. * 011 --> RZQ/6 ( 40ohm)
  1133. * 1** --> RESERVED
  1134. */
  1135. emrs1_cmd |= (1 << 3);
  1136. emrs1_cmd &= ~(1 << 6);
  1137. if (mrc_params->ron_value == 0)
  1138. emrs1_cmd |= (1 << 7);
  1139. else
  1140. emrs1_cmd &= ~(1 << 7);
  1141. if (mrc_params->rtt_nom_value == 0)
  1142. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_40 << 6);
  1143. else if (mrc_params->rtt_nom_value == 1)
  1144. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_60 << 6);
  1145. else if (mrc_params->rtt_nom_value == 2)
  1146. emrs1_cmd |= (DDR3_EMRS1_RTTNOM_120 << 6);
  1147. /* save MRS1 value (excluding control fields) */
  1148. mrc_params->mrs1 = emrs1_cmd >> 6;
  1149. /*
  1150. * setup for mrs 0
  1151. * BIT[15:13] --> Always "0"
  1152. * BIT[12] --> PPD: for Quark (1)
  1153. * BIT[11:09] --> WR: use oem_tWR
  1154. * BIT[08] --> DLL: want "Reset" (1, self clearing)
  1155. * BIT[07] --> MODE: want "Normal" (0)
  1156. * BIT[06:04,02] --> CL: use oem_tCAS
  1157. * BIT[03] --> RD_BURST_TYPE: want "Interleave" (1)
  1158. * BIT[01:00] --> BL: want "8 Fixed" (0)
  1159. * WR:
  1160. * 0 --> 16
  1161. * 1 --> 5
  1162. * 2 --> 6
  1163. * 3 --> 7
  1164. * 4 --> 8
  1165. * 5 --> 10
  1166. * 6 --> 12
  1167. * 7 --> 14
  1168. * CL:
  1169. * BIT[02:02] "0" if oem_tCAS <= 11 (1866?)
  1170. * BIT[06:04] use oem_tCAS-4
  1171. */
  1172. mrs0_cmd |= (1 << 14);
  1173. mrs0_cmd |= (1 << 18);
  1174. mrs0_cmd |= ((((dtr0 >> 12) & 7) + 1) << 10);
  1175. tck = t_ck[mrc_params->ddr_speed];
  1176. /* Per JEDEC: tWR=15000ps DDR2/3 from 800-1600 */
  1177. twr = MCEIL(15000, tck);
  1178. mrs0_cmd |= ((twr - 4) << 15);
  1179. for (rank = 0; rank < NUM_RANKS; rank++) {
  1180. /* Skip to next populated rank */
  1181. if ((mrc_params->rank_enables & (1 << rank)) == 0)
  1182. continue;
  1183. emrs2_cmd |= (rank << 22);
  1184. dram_init_command(emrs2_cmd);
  1185. emrs3_cmd |= (rank << 22);
  1186. dram_init_command(emrs3_cmd);
  1187. emrs1_cmd |= (rank << 22);
  1188. dram_init_command(emrs1_cmd);
  1189. mrs0_cmd |= (rank << 22);
  1190. dram_init_command(mrs0_cmd);
  1191. dram_init_command(DCMD_ZQCL(rank));
  1192. }
  1193. LEAVEFN();
  1194. }
  1195. /*
  1196. * Dunit Initialization Complete
  1197. *
  1198. * Indicates that initialization of the Dunit has completed.
  1199. *
  1200. * Memory accesses are permitted and maintenance operation begins.
  1201. * Until this bit is set to a 1, the memory controller will not accept
  1202. * DRAM requests from the MEMORY_MANAGER or HTE.
  1203. */
  1204. void set_ddr_init_complete(struct mrc_params *mrc_params)
  1205. {
  1206. u32 dco;
  1207. ENTERFN();
  1208. dco = msg_port_read(MEM_CTLR, DCO);
  1209. dco &= ~DCO_PMICTL;
  1210. dco |= DCO_IC;
  1211. msg_port_write(MEM_CTLR, DCO, dco);
  1212. LEAVEFN();
  1213. }
  1214. /*
  1215. * This function will retrieve relevant timing data
  1216. *
  1217. * This data will be used on subsequent boots to speed up boot times
  1218. * and is required for Suspend To RAM capabilities.
  1219. */
  1220. void restore_timings(struct mrc_params *mrc_params)
  1221. {
  1222. uint8_t ch, rk, bl;
  1223. const struct mrc_timings *mt = &mrc_params->timings;
  1224. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1225. for (rk = 0; rk < NUM_RANKS; rk++) {
  1226. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  1227. set_rcvn(ch, rk, bl, mt->rcvn[ch][rk][bl]);
  1228. set_rdqs(ch, rk, bl, mt->rdqs[ch][rk][bl]);
  1229. set_wdqs(ch, rk, bl, mt->wdqs[ch][rk][bl]);
  1230. set_wdq(ch, rk, bl, mt->wdq[ch][rk][bl]);
  1231. if (rk == 0) {
  1232. /* VREF (RANK0 only) */
  1233. set_vref(ch, bl, mt->vref[ch][bl]);
  1234. }
  1235. }
  1236. set_wctl(ch, rk, mt->wctl[ch][rk]);
  1237. }
  1238. set_wcmd(ch, mt->wcmd[ch]);
  1239. }
  1240. }
  1241. /*
  1242. * Configure default settings normally set as part of read training
  1243. *
  1244. * Some defaults have to be set earlier as they may affect earlier
  1245. * training steps.
  1246. */
  1247. void default_timings(struct mrc_params *mrc_params)
  1248. {
  1249. uint8_t ch, rk, bl;
  1250. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1251. for (rk = 0; rk < NUM_RANKS; rk++) {
  1252. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  1253. set_rdqs(ch, rk, bl, 24);
  1254. if (rk == 0) {
  1255. /* VREF (RANK0 only) */
  1256. set_vref(ch, bl, 32);
  1257. }
  1258. }
  1259. }
  1260. }
  1261. }
  1262. /*
  1263. * This function will perform our RCVEN Calibration Algorithm.
  1264. * We will only use the 2xCLK domain timings to perform RCVEN Calibration.
  1265. * All byte lanes will be calibrated "simultaneously" per channel per rank.
  1266. */
  1267. void rcvn_cal(struct mrc_params *mrc_params)
  1268. {
  1269. uint8_t ch; /* channel counter */
  1270. uint8_t rk; /* rank counter */
  1271. uint8_t bl; /* byte lane counter */
  1272. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1273. #ifdef R2R_SHARING
  1274. /* used to find placement for rank2rank sharing configs */
  1275. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1276. #ifndef BACKUP_RCVN
  1277. /* used to find placement for rank2rank sharing configs */
  1278. uint32_t num_ranks_enabled = 0;
  1279. #endif
  1280. #endif
  1281. #ifdef BACKUP_RCVN
  1282. #else
  1283. uint32_t temp;
  1284. /* absolute PI value to be programmed on the byte lane */
  1285. uint32_t delay[NUM_BYTE_LANES];
  1286. u32 dtr1, dtr1_save;
  1287. #endif
  1288. ENTERFN();
  1289. /* rcvn_cal starts */
  1290. mrc_post_code(0x05, 0x00);
  1291. #ifndef BACKUP_RCVN
  1292. /* need separate burst to sample DQS preamble */
  1293. dtr1 = msg_port_read(MEM_CTLR, DTR1);
  1294. dtr1_save = dtr1;
  1295. dtr1 |= DTR1_TCCD_12CLK;
  1296. msg_port_write(MEM_CTLR, DTR1, dtr1);
  1297. #endif
  1298. #ifdef R2R_SHARING
  1299. /* need to set "final_delay[][]" elements to "0" */
  1300. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1301. #endif
  1302. /* loop through each enabled channel */
  1303. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1304. if (mrc_params->channel_enables & (1 << ch)) {
  1305. /* perform RCVEN Calibration on a per rank basis */
  1306. for (rk = 0; rk < NUM_RANKS; rk++) {
  1307. if (mrc_params->rank_enables & (1 << rk)) {
  1308. /*
  1309. * POST_CODE here indicates the current
  1310. * channel and rank being calibrated
  1311. */
  1312. mrc_post_code(0x05, 0x10 + ((ch << 4) | rk));
  1313. #ifdef BACKUP_RCVN
  1314. /* et hard-coded timing values */
  1315. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++)
  1316. set_rcvn(ch, rk, bl, ddr_rcvn[PLATFORM_ID]);
  1317. #else
  1318. /* enable FIFORST */
  1319. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
  1320. mrc_alt_write_mask(DDRPHY,
  1321. B01PTRCTL1 +
  1322. (bl >> 1) * DDRIODQ_BL_OFFSET +
  1323. ch * DDRIODQ_CH_OFFSET,
  1324. 0, 1 << 8);
  1325. }
  1326. /* initialize the starting delay to 128 PI (cas +1 CLK) */
  1327. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1328. /* 1x CLK domain timing is cas-4 */
  1329. delay[bl] = (4 + 1) * FULL_CLK;
  1330. set_rcvn(ch, rk, bl, delay[bl]);
  1331. }
  1332. /* now find the rising edge */
  1333. find_rising_edge(mrc_params, delay, ch, rk, true);
  1334. /* Now increase delay by 32 PI (1/4 CLK) to place in center of high pulse */
  1335. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1336. delay[bl] += QRTR_CLK;
  1337. set_rcvn(ch, rk, bl, delay[bl]);
  1338. }
  1339. /* Now decrement delay by 128 PI (1 CLK) until we sample a "0" */
  1340. do {
  1341. temp = sample_dqs(mrc_params, ch, rk, true);
  1342. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1343. if (temp & (1 << bl)) {
  1344. if (delay[bl] >= FULL_CLK) {
  1345. delay[bl] -= FULL_CLK;
  1346. set_rcvn(ch, rk, bl, delay[bl]);
  1347. } else {
  1348. /* not enough delay */
  1349. training_message(ch, rk, bl);
  1350. mrc_post_code(0xee, 0x50);
  1351. }
  1352. }
  1353. }
  1354. } while (temp & 0xff);
  1355. #ifdef R2R_SHARING
  1356. /* increment "num_ranks_enabled" */
  1357. num_ranks_enabled++;
  1358. /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
  1359. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1360. delay[bl] += QRTR_CLK;
  1361. /* add "delay[]" values to "final_delay[][]" for rolling average */
  1362. final_delay[ch][bl] += delay[bl];
  1363. /* set timing based on rolling average values */
  1364. set_rcvn(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1365. }
  1366. #else
  1367. /* Finally increment delay by 32 PI (1/4 CLK) to place in center of preamble */
  1368. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1369. delay[bl] += QRTR_CLK;
  1370. set_rcvn(ch, rk, bl, delay[bl]);
  1371. }
  1372. #endif
  1373. /* disable FIFORST */
  1374. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl += 2) {
  1375. mrc_alt_write_mask(DDRPHY,
  1376. B01PTRCTL1 +
  1377. (bl >> 1) * DDRIODQ_BL_OFFSET +
  1378. ch * DDRIODQ_CH_OFFSET,
  1379. 1 << 8, 1 << 8);
  1380. }
  1381. #endif
  1382. }
  1383. }
  1384. }
  1385. }
  1386. #ifndef BACKUP_RCVN
  1387. /* restore original */
  1388. msg_port_write(MEM_CTLR, DTR1, dtr1_save);
  1389. #endif
  1390. LEAVEFN();
  1391. }
  1392. /*
  1393. * This function will perform the Write Levelling algorithm
  1394. * (align WCLK and WDQS).
  1395. *
  1396. * This algorithm will act on each rank in each channel separately.
  1397. */
  1398. void wr_level(struct mrc_params *mrc_params)
  1399. {
  1400. uint8_t ch; /* channel counter */
  1401. uint8_t rk; /* rank counter */
  1402. uint8_t bl; /* byte lane counter */
  1403. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1404. #ifdef R2R_SHARING
  1405. /* used to find placement for rank2rank sharing configs */
  1406. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1407. #ifndef BACKUP_WDQS
  1408. /* used to find placement for rank2rank sharing configs */
  1409. uint32_t num_ranks_enabled = 0;
  1410. #endif
  1411. #endif
  1412. #ifdef BACKUP_WDQS
  1413. #else
  1414. /* determines stop condition for CRS_WR_LVL */
  1415. bool all_edges_found;
  1416. /* absolute PI value to be programmed on the byte lane */
  1417. uint32_t delay[NUM_BYTE_LANES];
  1418. /*
  1419. * static makes it so the data is loaded in the heap once by shadow(),
  1420. * where non-static copies the data onto the stack every time this
  1421. * function is called
  1422. */
  1423. uint32_t address; /* address to be checked during COARSE_WR_LVL */
  1424. u32 dtr4, dtr4_save;
  1425. #endif
  1426. ENTERFN();
  1427. /* wr_level starts */
  1428. mrc_post_code(0x06, 0x00);
  1429. #ifdef R2R_SHARING
  1430. /* need to set "final_delay[][]" elements to "0" */
  1431. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1432. #endif
  1433. /* loop through each enabled channel */
  1434. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1435. if (mrc_params->channel_enables & (1 << ch)) {
  1436. /* perform WRITE LEVELING algorithm on a per rank basis */
  1437. for (rk = 0; rk < NUM_RANKS; rk++) {
  1438. if (mrc_params->rank_enables & (1 << rk)) {
  1439. /*
  1440. * POST_CODE here indicates the current
  1441. * rank and channel being calibrated
  1442. */
  1443. mrc_post_code(0x06, 0x10 + ((ch << 4) | rk));
  1444. #ifdef BACKUP_WDQS
  1445. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1446. set_wdqs(ch, rk, bl, ddr_wdqs[PLATFORM_ID]);
  1447. set_wdq(ch, rk, bl, ddr_wdqs[PLATFORM_ID] - QRTR_CLK);
  1448. }
  1449. #else
  1450. /*
  1451. * perform a single PRECHARGE_ALL command to
  1452. * make DRAM state machine go to IDLE state
  1453. */
  1454. dram_init_command(DCMD_PREA(rk));
  1455. /*
  1456. * enable Write Levelling Mode
  1457. * (EMRS1 w/ Write Levelling Mode Enable)
  1458. */
  1459. dram_init_command(DCMD_MRS1(rk, 0x82));
  1460. /*
  1461. * set ODT DRAM Full Time Termination
  1462. * disable in MCU
  1463. */
  1464. dtr4 = msg_port_read(MEM_CTLR, DTR4);
  1465. dtr4_save = dtr4;
  1466. dtr4 |= DTR4_ODTDIS;
  1467. msg_port_write(MEM_CTLR, DTR4, dtr4);
  1468. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  1469. /*
  1470. * Enable Sandy Bridge Mode (WDQ Tri-State) &
  1471. * Ensure 5 WDQS pulses during Write Leveling
  1472. */
  1473. mrc_alt_write_mask(DDRPHY,
  1474. DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
  1475. 0x10000154,
  1476. 0x100003fc);
  1477. }
  1478. /* Write Leveling Mode enabled in IO */
  1479. mrc_alt_write_mask(DDRPHY,
  1480. CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
  1481. 1 << 16, 1 << 16);
  1482. /* Initialize the starting delay to WCLK */
  1483. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1484. /*
  1485. * CLK0 --> RK0
  1486. * CLK1 --> RK1
  1487. */
  1488. delay[bl] = get_wclk(ch, rk);
  1489. set_wdqs(ch, rk, bl, delay[bl]);
  1490. }
  1491. /* now find the rising edge */
  1492. find_rising_edge(mrc_params, delay, ch, rk, false);
  1493. /* disable Write Levelling Mode */
  1494. mrc_alt_write_mask(DDRPHY,
  1495. CCDDR3RESETCTL + DDRIOCCC_CH_OFFSET * ch,
  1496. 0, 1 << 16);
  1497. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  1498. /* Disable Sandy Bridge Mode & Ensure 4 WDQS pulses during normal operation */
  1499. mrc_alt_write_mask(DDRPHY,
  1500. DQCTL + DDRIODQ_BL_OFFSET * bl + DDRIODQ_CH_OFFSET * ch,
  1501. 0x00000154,
  1502. 0x100003fc);
  1503. }
  1504. /* restore original DTR4 */
  1505. msg_port_write(MEM_CTLR, DTR4, dtr4_save);
  1506. /*
  1507. * restore original value
  1508. * (Write Levelling Mode Disable)
  1509. */
  1510. dram_init_command(DCMD_MRS1(rk, mrc_params->mrs1));
  1511. /*
  1512. * perform a single PRECHARGE_ALL command to
  1513. * make DRAM state machine go to IDLE state
  1514. */
  1515. dram_init_command(DCMD_PREA(rk));
  1516. mrc_post_code(0x06, 0x30 + ((ch << 4) | rk));
  1517. /*
  1518. * COARSE WRITE LEVEL:
  1519. * check that we're on the correct clock edge
  1520. */
  1521. /* hte reconfiguration request */
  1522. mrc_params->hte_setup = 1;
  1523. /* start CRS_WR_LVL with WDQS = WDQS + 128 PI */
  1524. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1525. delay[bl] = get_wdqs(ch, rk, bl) + FULL_CLK;
  1526. set_wdqs(ch, rk, bl, delay[bl]);
  1527. /*
  1528. * program WDQ timings based on WDQS
  1529. * (WDQ = WDQS - 32 PI)
  1530. */
  1531. set_wdq(ch, rk, bl, (delay[bl] - QRTR_CLK));
  1532. }
  1533. /* get an address in the targeted channel/rank */
  1534. address = get_addr(ch, rk);
  1535. do {
  1536. uint32_t coarse_result = 0x00;
  1537. uint32_t coarse_result_mask = byte_lane_mask(mrc_params);
  1538. /* assume pass */
  1539. all_edges_found = true;
  1540. mrc_params->hte_setup = 1;
  1541. coarse_result = check_rw_coarse(mrc_params, address);
  1542. /* check for failures and margin the byte lane back 128 PI (1 CLK) */
  1543. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1544. if (coarse_result & (coarse_result_mask << bl)) {
  1545. all_edges_found = false;
  1546. delay[bl] -= FULL_CLK;
  1547. set_wdqs(ch, rk, bl, delay[bl]);
  1548. /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
  1549. set_wdq(ch, rk, bl, delay[bl] - QRTR_CLK);
  1550. }
  1551. }
  1552. } while (!all_edges_found);
  1553. #ifdef R2R_SHARING
  1554. /* increment "num_ranks_enabled" */
  1555. num_ranks_enabled++;
  1556. /* accumulate "final_delay[][]" values from "delay[]" values for rolling average */
  1557. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1558. final_delay[ch][bl] += delay[bl];
  1559. set_wdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1560. /* program WDQ timings based on WDQS (WDQ = WDQS - 32 PI) */
  1561. set_wdq(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled - QRTR_CLK);
  1562. }
  1563. #endif
  1564. #endif
  1565. }
  1566. }
  1567. }
  1568. }
  1569. LEAVEFN();
  1570. }
  1571. void prog_page_ctrl(struct mrc_params *mrc_params)
  1572. {
  1573. u32 dpmc0;
  1574. ENTERFN();
  1575. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  1576. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  1577. dpmc0 |= (4 << 16);
  1578. dpmc0 |= DPMC0_PREAPWDEN;
  1579. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  1580. }
  1581. /*
  1582. * This function will perform the READ TRAINING Algorithm on all
  1583. * channels/ranks/byte_lanes simultaneously to minimize execution time.
  1584. *
  1585. * The idea here is to train the VREF and RDQS (and eventually RDQ) values
  1586. * to achieve maximum READ margins. The algorithm will first determine the
  1587. * X coordinate (RDQS setting). This is done by collapsing the VREF eye
  1588. * until we find a minimum required RDQS eye for VREF_MIN and VREF_MAX.
  1589. * Then we take the averages of the RDQS eye at VREF_MIN and VREF_MAX,
  1590. * then average those; this will be the final X coordinate. The algorithm
  1591. * will then determine the Y coordinate (VREF setting). This is done by
  1592. * collapsing the RDQS eye until we find a minimum required VREF eye for
  1593. * RDQS_MIN and RDQS_MAX. Then we take the averages of the VREF eye at
  1594. * RDQS_MIN and RDQS_MAX, then average those; this will be the final Y
  1595. * coordinate.
  1596. *
  1597. * NOTE: this algorithm assumes the eye curves have a one-to-one relationship,
  1598. * meaning for each X the curve has only one Y and vice-a-versa.
  1599. */
  1600. void rd_train(struct mrc_params *mrc_params)
  1601. {
  1602. uint8_t ch; /* channel counter */
  1603. uint8_t rk; /* rank counter */
  1604. uint8_t bl; /* byte lane counter */
  1605. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1606. #ifdef BACKUP_RDQS
  1607. #else
  1608. uint8_t side_x; /* tracks LEFT/RIGHT approach vectors */
  1609. uint8_t side_y; /* tracks BOTTOM/TOP approach vectors */
  1610. /* X coordinate data (passing RDQS values) for approach vectors */
  1611. uint8_t x_coordinate[2][2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1612. /* Y coordinate data (passing VREF values) for approach vectors */
  1613. uint8_t y_coordinate[2][2][NUM_CHANNELS][NUM_BYTE_LANES];
  1614. /* centered X (RDQS) */
  1615. uint8_t x_center[NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1616. /* centered Y (VREF) */
  1617. uint8_t y_center[NUM_CHANNELS][NUM_BYTE_LANES];
  1618. uint32_t address; /* target address for check_bls_ex() */
  1619. uint32_t result; /* result of check_bls_ex() */
  1620. uint32_t bl_mask; /* byte lane mask for result checking */
  1621. #ifdef R2R_SHARING
  1622. /* used to find placement for rank2rank sharing configs */
  1623. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1624. /* used to find placement for rank2rank sharing configs */
  1625. uint32_t num_ranks_enabled = 0;
  1626. #endif
  1627. #endif
  1628. /* rd_train starts */
  1629. mrc_post_code(0x07, 0x00);
  1630. ENTERFN();
  1631. #ifdef BACKUP_RDQS
  1632. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1633. if (mrc_params->channel_enables & (1 << ch)) {
  1634. for (rk = 0; rk < NUM_RANKS; rk++) {
  1635. if (mrc_params->rank_enables & (1 << rk)) {
  1636. for (bl = 0;
  1637. bl < NUM_BYTE_LANES / bl_divisor;
  1638. bl++) {
  1639. set_rdqs(ch, rk, bl, ddr_rdqs[PLATFORM_ID]);
  1640. }
  1641. }
  1642. }
  1643. }
  1644. }
  1645. #else
  1646. /* initialize x/y_coordinate arrays */
  1647. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1648. if (mrc_params->channel_enables & (1 << ch)) {
  1649. for (rk = 0; rk < NUM_RANKS; rk++) {
  1650. if (mrc_params->rank_enables & (1 << rk)) {
  1651. for (bl = 0;
  1652. bl < NUM_BYTE_LANES / bl_divisor;
  1653. bl++) {
  1654. /* x_coordinate */
  1655. x_coordinate[L][B][ch][rk][bl] = RDQS_MIN;
  1656. x_coordinate[R][B][ch][rk][bl] = RDQS_MAX;
  1657. x_coordinate[L][T][ch][rk][bl] = RDQS_MIN;
  1658. x_coordinate[R][T][ch][rk][bl] = RDQS_MAX;
  1659. /* y_coordinate */
  1660. y_coordinate[L][B][ch][bl] = VREF_MIN;
  1661. y_coordinate[R][B][ch][bl] = VREF_MIN;
  1662. y_coordinate[L][T][ch][bl] = VREF_MAX;
  1663. y_coordinate[R][T][ch][bl] = VREF_MAX;
  1664. }
  1665. }
  1666. }
  1667. }
  1668. }
  1669. /* initialize other variables */
  1670. bl_mask = byte_lane_mask(mrc_params);
  1671. address = get_addr(0, 0);
  1672. #ifdef R2R_SHARING
  1673. /* need to set "final_delay[][]" elements to "0" */
  1674. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1675. #endif
  1676. /* look for passing coordinates */
  1677. for (side_y = B; side_y <= T; side_y++) {
  1678. for (side_x = L; side_x <= R; side_x++) {
  1679. mrc_post_code(0x07, 0x10 + side_y * 2 + side_x);
  1680. /* find passing values */
  1681. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1682. if (mrc_params->channel_enables & (0x1 << ch)) {
  1683. for (rk = 0; rk < NUM_RANKS; rk++) {
  1684. if (mrc_params->rank_enables &
  1685. (0x1 << rk)) {
  1686. /* set x/y_coordinate search starting settings */
  1687. for (bl = 0;
  1688. bl < NUM_BYTE_LANES / bl_divisor;
  1689. bl++) {
  1690. set_rdqs(ch, rk, bl,
  1691. x_coordinate[side_x][side_y][ch][rk][bl]);
  1692. set_vref(ch, bl,
  1693. y_coordinate[side_x][side_y][ch][bl]);
  1694. }
  1695. /* get an address in the target channel/rank */
  1696. address = get_addr(ch, rk);
  1697. /* request HTE reconfiguration */
  1698. mrc_params->hte_setup = 1;
  1699. /* test the settings */
  1700. do {
  1701. /* result[07:00] == failing byte lane (MAX 8) */
  1702. result = check_bls_ex(mrc_params, address);
  1703. /* check for failures */
  1704. if (result & 0xff) {
  1705. /* at least 1 byte lane failed */
  1706. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1707. if (result &
  1708. (bl_mask << bl)) {
  1709. /* adjust the RDQS values accordingly */
  1710. if (side_x == L)
  1711. x_coordinate[L][side_y][ch][rk][bl] += RDQS_STEP;
  1712. else
  1713. x_coordinate[R][side_y][ch][rk][bl] -= RDQS_STEP;
  1714. /* check that we haven't closed the RDQS_EYE too much */
  1715. if ((x_coordinate[L][side_y][ch][rk][bl] > (RDQS_MAX - MIN_RDQS_EYE)) ||
  1716. (x_coordinate[R][side_y][ch][rk][bl] < (RDQS_MIN + MIN_RDQS_EYE)) ||
  1717. (x_coordinate[L][side_y][ch][rk][bl] ==
  1718. x_coordinate[R][side_y][ch][rk][bl])) {
  1719. /*
  1720. * not enough RDQS margin available at this VREF
  1721. * update VREF values accordingly
  1722. */
  1723. if (side_y == B)
  1724. y_coordinate[side_x][B][ch][bl] += VREF_STEP;
  1725. else
  1726. y_coordinate[side_x][T][ch][bl] -= VREF_STEP;
  1727. /* check that we haven't closed the VREF_EYE too much */
  1728. if ((y_coordinate[side_x][B][ch][bl] > (VREF_MAX - MIN_VREF_EYE)) ||
  1729. (y_coordinate[side_x][T][ch][bl] < (VREF_MIN + MIN_VREF_EYE)) ||
  1730. (y_coordinate[side_x][B][ch][bl] == y_coordinate[side_x][T][ch][bl])) {
  1731. /* VREF_EYE collapsed below MIN_VREF_EYE */
  1732. training_message(ch, rk, bl);
  1733. mrc_post_code(0xEE, 0x70 + side_y * 2 + side_x);
  1734. } else {
  1735. /* update the VREF setting */
  1736. set_vref(ch, bl, y_coordinate[side_x][side_y][ch][bl]);
  1737. /* reset the X coordinate to begin the search at the new VREF */
  1738. x_coordinate[side_x][side_y][ch][rk][bl] =
  1739. (side_x == L) ? RDQS_MIN : RDQS_MAX;
  1740. }
  1741. }
  1742. /* update the RDQS setting */
  1743. set_rdqs(ch, rk, bl, x_coordinate[side_x][side_y][ch][rk][bl]);
  1744. }
  1745. }
  1746. }
  1747. } while (result & 0xff);
  1748. }
  1749. }
  1750. }
  1751. }
  1752. }
  1753. }
  1754. mrc_post_code(0x07, 0x20);
  1755. /* find final RDQS (X coordinate) & final VREF (Y coordinate) */
  1756. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1757. if (mrc_params->channel_enables & (1 << ch)) {
  1758. for (rk = 0; rk < NUM_RANKS; rk++) {
  1759. if (mrc_params->rank_enables & (1 << rk)) {
  1760. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1761. uint32_t temp1;
  1762. uint32_t temp2;
  1763. /* x_coordinate */
  1764. DPF(D_INFO,
  1765. "RDQS T/B eye rank%d lane%d : %d-%d %d-%d\n",
  1766. rk, bl,
  1767. x_coordinate[L][T][ch][rk][bl],
  1768. x_coordinate[R][T][ch][rk][bl],
  1769. x_coordinate[L][B][ch][rk][bl],
  1770. x_coordinate[R][B][ch][rk][bl]);
  1771. /* average the TOP side LEFT & RIGHT values */
  1772. temp1 = (x_coordinate[R][T][ch][rk][bl] + x_coordinate[L][T][ch][rk][bl]) / 2;
  1773. /* average the BOTTOM side LEFT & RIGHT values */
  1774. temp2 = (x_coordinate[R][B][ch][rk][bl] + x_coordinate[L][B][ch][rk][bl]) / 2;
  1775. /* average the above averages */
  1776. x_center[ch][rk][bl] = (uint8_t) ((temp1 + temp2) / 2);
  1777. /* y_coordinate */
  1778. DPF(D_INFO,
  1779. "VREF R/L eye lane%d : %d-%d %d-%d\n",
  1780. bl,
  1781. y_coordinate[R][B][ch][bl],
  1782. y_coordinate[R][T][ch][bl],
  1783. y_coordinate[L][B][ch][bl],
  1784. y_coordinate[L][T][ch][bl]);
  1785. /* average the RIGHT side TOP & BOTTOM values */
  1786. temp1 = (y_coordinate[R][T][ch][bl] + y_coordinate[R][B][ch][bl]) / 2;
  1787. /* average the LEFT side TOP & BOTTOM values */
  1788. temp2 = (y_coordinate[L][T][ch][bl] + y_coordinate[L][B][ch][bl]) / 2;
  1789. /* average the above averages */
  1790. y_center[ch][bl] = (uint8_t) ((temp1 + temp2) / 2);
  1791. }
  1792. }
  1793. }
  1794. }
  1795. }
  1796. #ifdef RX_EYE_CHECK
  1797. /* perform an eye check */
  1798. for (side_y = B; side_y <= T; side_y++) {
  1799. for (side_x = L; side_x <= R; side_x++) {
  1800. mrc_post_code(0x07, 0x30 + side_y * 2 + side_x);
  1801. /* update the settings for the eye check */
  1802. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1803. if (mrc_params->channel_enables & (1 << ch)) {
  1804. for (rk = 0; rk < NUM_RANKS; rk++) {
  1805. if (mrc_params->rank_enables & (1 << rk)) {
  1806. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1807. if (side_x == L)
  1808. set_rdqs(ch, rk, bl, x_center[ch][rk][bl] - (MIN_RDQS_EYE / 2));
  1809. else
  1810. set_rdqs(ch, rk, bl, x_center[ch][rk][bl] + (MIN_RDQS_EYE / 2));
  1811. if (side_y == B)
  1812. set_vref(ch, bl, y_center[ch][bl] - (MIN_VREF_EYE / 2));
  1813. else
  1814. set_vref(ch, bl, y_center[ch][bl] + (MIN_VREF_EYE / 2));
  1815. }
  1816. }
  1817. }
  1818. }
  1819. }
  1820. /* request HTE reconfiguration */
  1821. mrc_params->hte_setup = 1;
  1822. /* check the eye */
  1823. if (check_bls_ex(mrc_params, address) & 0xff) {
  1824. /* one or more byte lanes failed */
  1825. mrc_post_code(0xee, 0x74 + side_x * 2 + side_y);
  1826. }
  1827. }
  1828. }
  1829. #endif
  1830. mrc_post_code(0x07, 0x40);
  1831. /* set final placements */
  1832. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1833. if (mrc_params->channel_enables & (1 << ch)) {
  1834. for (rk = 0; rk < NUM_RANKS; rk++) {
  1835. if (mrc_params->rank_enables & (1 << rk)) {
  1836. #ifdef R2R_SHARING
  1837. /* increment "num_ranks_enabled" */
  1838. num_ranks_enabled++;
  1839. #endif
  1840. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor); bl++) {
  1841. /* x_coordinate */
  1842. #ifdef R2R_SHARING
  1843. final_delay[ch][bl] += x_center[ch][rk][bl];
  1844. set_rdqs(ch, rk, bl, final_delay[ch][bl] / num_ranks_enabled);
  1845. #else
  1846. set_rdqs(ch, rk, bl, x_center[ch][rk][bl]);
  1847. #endif
  1848. /* y_coordinate */
  1849. set_vref(ch, bl, y_center[ch][bl]);
  1850. }
  1851. }
  1852. }
  1853. }
  1854. }
  1855. #endif
  1856. LEAVEFN();
  1857. }
  1858. /*
  1859. * This function will perform the WRITE TRAINING Algorithm on all
  1860. * channels/ranks/byte_lanes simultaneously to minimize execution time.
  1861. *
  1862. * The idea here is to train the WDQ timings to achieve maximum WRITE margins.
  1863. * The algorithm will start with WDQ at the current WDQ setting (tracks WDQS
  1864. * in WR_LVL) +/- 32 PIs (+/- 1/4 CLK) and collapse the eye until all data
  1865. * patterns pass. This is because WDQS will be aligned to WCLK by the
  1866. * Write Leveling algorithm and WDQ will only ever have a 1/2 CLK window
  1867. * of validity.
  1868. */
  1869. void wr_train(struct mrc_params *mrc_params)
  1870. {
  1871. uint8_t ch; /* channel counter */
  1872. uint8_t rk; /* rank counter */
  1873. uint8_t bl; /* byte lane counter */
  1874. uint8_t bl_divisor = (mrc_params->channel_width == X16) ? 2 : 1;
  1875. #ifdef BACKUP_WDQ
  1876. #else
  1877. uint8_t side; /* LEFT/RIGHT side indicator (0=L, 1=R) */
  1878. uint32_t temp; /* temporary DWORD */
  1879. /* 2 arrays, for L & R side passing delays */
  1880. uint32_t delay[2][NUM_CHANNELS][NUM_RANKS][NUM_BYTE_LANES];
  1881. uint32_t address; /* target address for check_bls_ex() */
  1882. uint32_t result; /* result of check_bls_ex() */
  1883. uint32_t bl_mask; /* byte lane mask for result checking */
  1884. #ifdef R2R_SHARING
  1885. /* used to find placement for rank2rank sharing configs */
  1886. uint32_t final_delay[NUM_CHANNELS][NUM_BYTE_LANES];
  1887. /* used to find placement for rank2rank sharing configs */
  1888. uint32_t num_ranks_enabled = 0;
  1889. #endif
  1890. #endif
  1891. /* wr_train starts */
  1892. mrc_post_code(0x08, 0x00);
  1893. ENTERFN();
  1894. #ifdef BACKUP_WDQ
  1895. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1896. if (mrc_params->channel_enables & (1 << ch)) {
  1897. for (rk = 0; rk < NUM_RANKS; rk++) {
  1898. if (mrc_params->rank_enables & (1 << rk)) {
  1899. for (bl = 0;
  1900. bl < NUM_BYTE_LANES / bl_divisor;
  1901. bl++) {
  1902. set_wdq(ch, rk, bl, ddr_wdq[PLATFORM_ID]);
  1903. }
  1904. }
  1905. }
  1906. }
  1907. }
  1908. #else
  1909. /* initialize "delay" */
  1910. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1911. if (mrc_params->channel_enables & (1 << ch)) {
  1912. for (rk = 0; rk < NUM_RANKS; rk++) {
  1913. if (mrc_params->rank_enables & (1 << rk)) {
  1914. for (bl = 0;
  1915. bl < NUM_BYTE_LANES / bl_divisor;
  1916. bl++) {
  1917. /*
  1918. * want to start with
  1919. * WDQ = (WDQS - QRTR_CLK)
  1920. * +/- QRTR_CLK
  1921. */
  1922. temp = get_wdqs(ch, rk, bl) - QRTR_CLK;
  1923. delay[L][ch][rk][bl] = temp - QRTR_CLK;
  1924. delay[R][ch][rk][bl] = temp + QRTR_CLK;
  1925. }
  1926. }
  1927. }
  1928. }
  1929. }
  1930. /* initialize other variables */
  1931. bl_mask = byte_lane_mask(mrc_params);
  1932. address = get_addr(0, 0);
  1933. #ifdef R2R_SHARING
  1934. /* need to set "final_delay[][]" elements to "0" */
  1935. memset((void *)(final_delay), 0x00, (size_t)sizeof(final_delay));
  1936. #endif
  1937. /*
  1938. * start algorithm on the LEFT side and train each channel/bl
  1939. * until no failures are observed, then repeat for the RIGHT side.
  1940. */
  1941. for (side = L; side <= R; side++) {
  1942. mrc_post_code(0x08, 0x10 + side);
  1943. /* set starting values */
  1944. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1945. if (mrc_params->channel_enables & (1 << ch)) {
  1946. for (rk = 0; rk < NUM_RANKS; rk++) {
  1947. if (mrc_params->rank_enables &
  1948. (1 << rk)) {
  1949. for (bl = 0;
  1950. bl < NUM_BYTE_LANES / bl_divisor;
  1951. bl++) {
  1952. set_wdq(ch, rk, bl, delay[side][ch][rk][bl]);
  1953. }
  1954. }
  1955. }
  1956. }
  1957. }
  1958. /* find passing values */
  1959. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  1960. if (mrc_params->channel_enables & (1 << ch)) {
  1961. for (rk = 0; rk < NUM_RANKS; rk++) {
  1962. if (mrc_params->rank_enables &
  1963. (1 << rk)) {
  1964. /* get an address in the target channel/rank */
  1965. address = get_addr(ch, rk);
  1966. /* request HTE reconfiguration */
  1967. mrc_params->hte_setup = 1;
  1968. /* check the settings */
  1969. do {
  1970. /* result[07:00] == failing byte lane (MAX 8) */
  1971. result = check_bls_ex(mrc_params, address);
  1972. /* check for failures */
  1973. if (result & 0xff) {
  1974. /* at least 1 byte lane failed */
  1975. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  1976. if (result &
  1977. (bl_mask << bl)) {
  1978. if (side == L)
  1979. delay[L][ch][rk][bl] += WDQ_STEP;
  1980. else
  1981. delay[R][ch][rk][bl] -= WDQ_STEP;
  1982. /* check for algorithm failure */
  1983. if (delay[L][ch][rk][bl] != delay[R][ch][rk][bl]) {
  1984. /*
  1985. * margin available
  1986. * update delay setting
  1987. */
  1988. set_wdq(ch, rk, bl,
  1989. delay[side][ch][rk][bl]);
  1990. } else {
  1991. /*
  1992. * no margin available
  1993. * notify the user and halt
  1994. */
  1995. training_message(ch, rk, bl);
  1996. mrc_post_code(0xee, 0x80 + side);
  1997. }
  1998. }
  1999. }
  2000. }
  2001. /* stop when all byte lanes pass */
  2002. } while (result & 0xff);
  2003. }
  2004. }
  2005. }
  2006. }
  2007. }
  2008. /* program WDQ to the middle of passing window */
  2009. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  2010. if (mrc_params->channel_enables & (1 << ch)) {
  2011. for (rk = 0; rk < NUM_RANKS; rk++) {
  2012. if (mrc_params->rank_enables & (1 << rk)) {
  2013. #ifdef R2R_SHARING
  2014. /* increment "num_ranks_enabled" */
  2015. num_ranks_enabled++;
  2016. #endif
  2017. for (bl = 0; bl < NUM_BYTE_LANES / bl_divisor; bl++) {
  2018. DPF(D_INFO,
  2019. "WDQ eye rank%d lane%d : %d-%d\n",
  2020. rk, bl,
  2021. delay[L][ch][rk][bl],
  2022. delay[R][ch][rk][bl]);
  2023. temp = (delay[R][ch][rk][bl] + delay[L][ch][rk][bl]) / 2;
  2024. #ifdef R2R_SHARING
  2025. final_delay[ch][bl] += temp;
  2026. set_wdq(ch, rk, bl,
  2027. final_delay[ch][bl] / num_ranks_enabled);
  2028. #else
  2029. set_wdq(ch, rk, bl, temp);
  2030. #endif
  2031. }
  2032. }
  2033. }
  2034. }
  2035. }
  2036. #endif
  2037. LEAVEFN();
  2038. }
  2039. /*
  2040. * This function will store relevant timing data
  2041. *
  2042. * This data will be used on subsequent boots to speed up boot times
  2043. * and is required for Suspend To RAM capabilities.
  2044. */
  2045. void store_timings(struct mrc_params *mrc_params)
  2046. {
  2047. uint8_t ch, rk, bl;
  2048. struct mrc_timings *mt = &mrc_params->timings;
  2049. for (ch = 0; ch < NUM_CHANNELS; ch++) {
  2050. for (rk = 0; rk < NUM_RANKS; rk++) {
  2051. for (bl = 0; bl < NUM_BYTE_LANES; bl++) {
  2052. mt->rcvn[ch][rk][bl] = get_rcvn(ch, rk, bl);
  2053. mt->rdqs[ch][rk][bl] = get_rdqs(ch, rk, bl);
  2054. mt->wdqs[ch][rk][bl] = get_wdqs(ch, rk, bl);
  2055. mt->wdq[ch][rk][bl] = get_wdq(ch, rk, bl);
  2056. if (rk == 0)
  2057. mt->vref[ch][bl] = get_vref(ch, bl);
  2058. }
  2059. mt->wctl[ch][rk] = get_wctl(ch, rk);
  2060. }
  2061. mt->wcmd[ch] = get_wcmd(ch);
  2062. }
  2063. /* need to save for a case of changing frequency after warm reset */
  2064. mt->ddr_speed = mrc_params->ddr_speed;
  2065. }
  2066. /*
  2067. * The purpose of this function is to ensure the SEC comes out of reset
  2068. * and IA initiates the SEC enabling Memory Scrambling.
  2069. */
  2070. void enable_scrambling(struct mrc_params *mrc_params)
  2071. {
  2072. uint32_t lfsr = 0;
  2073. uint8_t i;
  2074. if (mrc_params->scrambling_enables == 0)
  2075. return;
  2076. ENTERFN();
  2077. /* 32 bit seed is always stored in BIOS NVM */
  2078. lfsr = mrc_params->timings.scrambler_seed;
  2079. if (mrc_params->boot_mode == BM_COLD) {
  2080. /*
  2081. * factory value is 0 and in first boot,
  2082. * a clock based seed is loaded.
  2083. */
  2084. if (lfsr == 0) {
  2085. /*
  2086. * get seed from system clock
  2087. * and make sure it is not all 1's
  2088. */
  2089. lfsr = rdtsc() & 0x0fffffff;
  2090. } else {
  2091. /*
  2092. * Need to replace scrambler
  2093. *
  2094. * get next 32bit LFSR 16 times which is the last
  2095. * part of the previous scrambler vector
  2096. */
  2097. for (i = 0; i < 16; i++)
  2098. lfsr32(&lfsr);
  2099. }
  2100. /* save new seed */
  2101. mrc_params->timings.scrambler_seed = lfsr;
  2102. }
  2103. /*
  2104. * In warm boot or S3 exit, we have the previous seed.
  2105. * In cold boot, we have the last 32bit LFSR which is the new seed.
  2106. */
  2107. lfsr32(&lfsr); /* shift to next value */
  2108. msg_port_write(MEM_CTLR, SCRMSEED, (lfsr & 0x0003ffff));
  2109. for (i = 0; i < 2; i++)
  2110. msg_port_write(MEM_CTLR, SCRMLO + i, (lfsr & 0xaaaaaaaa));
  2111. LEAVEFN();
  2112. }
  2113. /*
  2114. * Configure MCU Power Management Control Register
  2115. * and Scheduler Control Register
  2116. */
  2117. void prog_ddr_control(struct mrc_params *mrc_params)
  2118. {
  2119. u32 dsch;
  2120. u32 dpmc0;
  2121. ENTERFN();
  2122. dsch = msg_port_read(MEM_CTLR, DSCH);
  2123. dsch &= ~(DSCH_OOODIS | DSCH_OOOST3DIS | DSCH_NEWBYPDIS);
  2124. msg_port_write(MEM_CTLR, DSCH, dsch);
  2125. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  2126. dpmc0 &= ~DPMC0_DISPWRDN;
  2127. dpmc0 |= (mrc_params->power_down_disable << 25);
  2128. dpmc0 &= ~DPMC0_CLKGTDIS;
  2129. dpmc0 &= ~DPMC0_PCLSTO_MASK;
  2130. dpmc0 |= (4 << 16);
  2131. dpmc0 |= DPMC0_PREAPWDEN;
  2132. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  2133. /* CMDTRIST = 2h - CMD/ADDR are tristated when no valid command */
  2134. mrc_write_mask(MEM_CTLR, DPMC1, 0x20, 0x30);
  2135. LEAVEFN();
  2136. }
  2137. /*
  2138. * After training complete configure MCU Rank Population Register
  2139. * specifying: ranks enabled, device width, density, address mode
  2140. */
  2141. void prog_dra_drb(struct mrc_params *mrc_params)
  2142. {
  2143. u32 drp;
  2144. u32 dco;
  2145. u8 density = mrc_params->params.density;
  2146. ENTERFN();
  2147. dco = msg_port_read(MEM_CTLR, DCO);
  2148. dco &= ~DCO_IC;
  2149. msg_port_write(MEM_CTLR, DCO, dco);
  2150. drp = 0;
  2151. if (mrc_params->rank_enables & 1)
  2152. drp |= DRP_RKEN0;
  2153. if (mrc_params->rank_enables & 2)
  2154. drp |= DRP_RKEN1;
  2155. if (mrc_params->dram_width == X16) {
  2156. drp |= (1 << 4);
  2157. drp |= (1 << 9);
  2158. }
  2159. /*
  2160. * Density encoding in struct dram_params: 0=512Mb, 1=Gb, 2=2Gb, 3=4Gb
  2161. * has to be mapped RANKDENSx encoding (0=1Gb)
  2162. */
  2163. if (density == 0)
  2164. density = 4;
  2165. drp |= ((density - 1) << 6);
  2166. drp |= ((density - 1) << 11);
  2167. /* Address mode can be overwritten if ECC enabled */
  2168. drp |= (mrc_params->address_mode << 14);
  2169. msg_port_write(MEM_CTLR, DRP, drp);
  2170. dco &= ~DCO_PMICTL;
  2171. dco |= DCO_IC;
  2172. msg_port_write(MEM_CTLR, DCO, dco);
  2173. LEAVEFN();
  2174. }
  2175. /* Send DRAM wake command */
  2176. void perform_wake(struct mrc_params *mrc_params)
  2177. {
  2178. ENTERFN();
  2179. dram_wake_command();
  2180. LEAVEFN();
  2181. }
  2182. /*
  2183. * Configure refresh rate and short ZQ calibration interval
  2184. * Activate dynamic self refresh
  2185. */
  2186. void change_refresh_period(struct mrc_params *mrc_params)
  2187. {
  2188. u32 drfc;
  2189. u32 dcal;
  2190. u32 dpmc0;
  2191. ENTERFN();
  2192. drfc = msg_port_read(MEM_CTLR, DRFC);
  2193. drfc &= ~DRFC_TREFI_MASK;
  2194. drfc |= (mrc_params->refresh_rate << 12);
  2195. drfc |= DRFC_REFDBTCLR;
  2196. msg_port_write(MEM_CTLR, DRFC, drfc);
  2197. dcal = msg_port_read(MEM_CTLR, DCAL);
  2198. dcal &= ~DCAL_ZQCINT_MASK;
  2199. dcal |= (3 << 8); /* 63ms */
  2200. msg_port_write(MEM_CTLR, DCAL, dcal);
  2201. dpmc0 = msg_port_read(MEM_CTLR, DPMC0);
  2202. dpmc0 |= (DPMC0_DYNSREN | DPMC0_ENPHYCLKGATE);
  2203. msg_port_write(MEM_CTLR, DPMC0, dpmc0);
  2204. LEAVEFN();
  2205. }
  2206. /*
  2207. * Configure DDRPHY for Auto-Refresh, Periodic Compensations,
  2208. * Dynamic Diff-Amp, ZQSPERIOD, Auto-Precharge, CKE Power-Down
  2209. */
  2210. void set_auto_refresh(struct mrc_params *mrc_params)
  2211. {
  2212. uint32_t channel;
  2213. uint32_t rank;
  2214. uint32_t bl;
  2215. uint32_t bl_divisor = 1;
  2216. uint32_t temp;
  2217. ENTERFN();
  2218. /*
  2219. * Enable Auto-Refresh, Periodic Compensations, Dynamic Diff-Amp,
  2220. * ZQSPERIOD, Auto-Precharge, CKE Power-Down
  2221. */
  2222. for (channel = 0; channel < NUM_CHANNELS; channel++) {
  2223. if (mrc_params->channel_enables & (1 << channel)) {
  2224. /* Enable Periodic RCOMPS */
  2225. mrc_alt_write_mask(DDRPHY, CMPCTRL, 2, 2);
  2226. /* Enable Dynamic DiffAmp & Set Read ODT Value */
  2227. switch (mrc_params->rd_odt_value) {
  2228. case 0:
  2229. temp = 0x3f; /* OFF */
  2230. break;
  2231. default:
  2232. temp = 0x00; /* Auto */
  2233. break;
  2234. }
  2235. for (bl = 0; bl < (NUM_BYTE_LANES / bl_divisor) / 2; bl++) {
  2236. /* Override: DIFFAMP, ODT */
  2237. mrc_alt_write_mask(DDRPHY,
  2238. B0OVRCTL + bl * DDRIODQ_BL_OFFSET +
  2239. channel * DDRIODQ_CH_OFFSET,
  2240. temp << 10,
  2241. 0x003ffc00);
  2242. /* Override: DIFFAMP, ODT */
  2243. mrc_alt_write_mask(DDRPHY,
  2244. B1OVRCTL + bl * DDRIODQ_BL_OFFSET +
  2245. channel * DDRIODQ_CH_OFFSET,
  2246. temp << 10,
  2247. 0x003ffc00);
  2248. }
  2249. /* Issue ZQCS command */
  2250. for (rank = 0; rank < NUM_RANKS; rank++) {
  2251. if (mrc_params->rank_enables & (1 << rank))
  2252. dram_init_command(DCMD_ZQCS(rank));
  2253. }
  2254. }
  2255. }
  2256. clear_pointers();
  2257. LEAVEFN();
  2258. }
  2259. /*
  2260. * Depending on configuration enables ECC support
  2261. *
  2262. * Available memory size is decreased, and updated with 0s
  2263. * in order to clear error status. Address mode 2 forced.
  2264. */
  2265. void ecc_enable(struct mrc_params *mrc_params)
  2266. {
  2267. u32 drp;
  2268. u32 dsch;
  2269. u32 ecc_ctrl;
  2270. if (mrc_params->ecc_enables == 0)
  2271. return;
  2272. ENTERFN();
  2273. /* Configuration required in ECC mode */
  2274. drp = msg_port_read(MEM_CTLR, DRP);
  2275. drp &= ~DRP_ADDRMAP_MASK;
  2276. drp |= DRP_ADDRMAP_MAP1;
  2277. drp |= DRP_PRI64BSPLITEN;
  2278. msg_port_write(MEM_CTLR, DRP, drp);
  2279. /* Disable new request bypass */
  2280. dsch = msg_port_read(MEM_CTLR, DSCH);
  2281. dsch |= DSCH_NEWBYPDIS;
  2282. msg_port_write(MEM_CTLR, DSCH, dsch);
  2283. /* Enable ECC */
  2284. ecc_ctrl = (DECCCTRL_SBEEN | DECCCTRL_DBEEN | DECCCTRL_ENCBGEN);
  2285. msg_port_write(MEM_CTLR, DECCCTRL, ecc_ctrl);
  2286. /* Assume 8 bank memory, one bank is gone for ECC */
  2287. mrc_params->mem_size -= mrc_params->mem_size / 8;
  2288. /* For S3 resume memory content has to be preserved */
  2289. if (mrc_params->boot_mode != BM_S3) {
  2290. select_hte();
  2291. hte_mem_init(mrc_params, MRC_MEM_INIT);
  2292. select_mem_mgr();
  2293. }
  2294. LEAVEFN();
  2295. }
  2296. /*
  2297. * Execute memory test
  2298. * if error detected it is indicated in mrc_params->status
  2299. */
  2300. void memory_test(struct mrc_params *mrc_params)
  2301. {
  2302. uint32_t result = 0;
  2303. ENTERFN();
  2304. select_hte();
  2305. result = hte_mem_init(mrc_params, MRC_MEM_TEST);
  2306. select_mem_mgr();
  2307. DPF(D_INFO, "Memory test result %x\n", result);
  2308. mrc_params->status = ((result == 0) ? MRC_SUCCESS : MRC_E_MEMTEST);
  2309. LEAVEFN();
  2310. }
  2311. /* Lock MCU registers at the end of initialization sequence */
  2312. void lock_registers(struct mrc_params *mrc_params)
  2313. {
  2314. u32 dco;
  2315. ENTERFN();
  2316. dco = msg_port_read(MEM_CTLR, DCO);
  2317. dco &= ~(DCO_PMICTL | DCO_PMIDIS);
  2318. dco |= (DCO_DRPLOCK | DCO_CPGCLOCK);
  2319. msg_port_write(MEM_CTLR, DCO, dco);
  2320. LEAVEFN();
  2321. }