ddr.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. /*
  2. * Copyright (C) 2014 Gateworks Corporation
  3. * Author: Tim Harvey <tharvey@gateworks.com>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <linux/types.h>
  9. #include <asm/arch/clock.h>
  10. #include <asm/arch/mx6-ddr.h>
  11. #include <asm/arch/sys_proto.h>
  12. #include <asm/io.h>
  13. #include <asm/types.h>
  14. #include <wait_bit.h>
  15. #if defined(CONFIG_MX6_DDRCAL)
  16. static void reset_read_data_fifos(void)
  17. {
  18. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  19. /* Reset data FIFOs twice. */
  20. setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
  21. wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
  22. setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
  23. wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
  24. }
  25. static void precharge_all(const bool cs0_enable, const bool cs1_enable)
  26. {
  27. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  28. /*
  29. * Issue the Precharge-All command to the DDR device for both
  30. * chip selects. Note, CON_REQ bit should also remain set. If
  31. * only using one chip select, then precharge only the desired
  32. * chip select.
  33. */
  34. if (cs0_enable) { /* CS0 */
  35. writel(0x04008050, &mmdc0->mdscr);
  36. wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
  37. }
  38. if (cs1_enable) { /* CS1 */
  39. writel(0x04008058, &mmdc0->mdscr);
  40. wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
  41. }
  42. }
  43. static void force_delay_measurement(int bus_size)
  44. {
  45. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  46. struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
  47. writel(0x800, &mmdc0->mpmur0);
  48. if (bus_size == 0x2)
  49. writel(0x800, &mmdc1->mpmur0);
  50. }
  51. static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl)
  52. {
  53. u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl;
  54. /*
  55. * DQS gating absolute offset should be modified from reflecting
  56. * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80)
  57. */
  58. val_ctrl = readl(reg_ctrl);
  59. val_ctrl &= 0xf0000000;
  60. dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0;
  61. dg_dl_abs_offset = dg_tmp_val & 0x7f;
  62. dg_hc_del = (dg_tmp_val & 0x780) << 1;
  63. val_ctrl |= dg_dl_abs_offset + dg_hc_del;
  64. dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0;
  65. dg_dl_abs_offset = dg_tmp_val & 0x7f;
  66. dg_hc_del = (dg_tmp_val & 0x780) << 1;
  67. val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16;
  68. writel(val_ctrl, reg_ctrl);
  69. }
  70. static void correct_mpwldectr_result(void *reg)
  71. {
  72. /* Limit is 200/256 of CK, which is WL_HC_DELx | 0x48. */
  73. const unsigned int limit = 0x148;
  74. u32 val = readl(reg);
  75. u32 old = val;
  76. if ((val & 0x17f) > limit)
  77. val &= 0xffff << 16;
  78. if (((val >> 16) & 0x17f) > limit)
  79. val &= 0xffff;
  80. if (old != val)
  81. writel(val, reg);
  82. }
  83. int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo)
  84. {
  85. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  86. struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
  87. u32 esdmisc_val, zq_val;
  88. u32 errors = 0;
  89. u32 ldectrl[4] = {0};
  90. u32 ddr_mr1 = 0x4;
  91. u32 rwalat_max;
  92. /*
  93. * Stash old values in case calibration fails,
  94. * we need to restore them
  95. */
  96. ldectrl[0] = readl(&mmdc0->mpwldectrl0);
  97. ldectrl[1] = readl(&mmdc0->mpwldectrl1);
  98. if (sysinfo->dsize == 2) {
  99. ldectrl[2] = readl(&mmdc1->mpwldectrl0);
  100. ldectrl[3] = readl(&mmdc1->mpwldectrl1);
  101. }
  102. /* disable DDR logic power down timer */
  103. clrbits_le32(&mmdc0->mdpdc, 0xff00);
  104. /* disable Adopt power down timer */
  105. setbits_le32(&mmdc0->mapsr, 0x1);
  106. debug("Starting write leveling calibration.\n");
  107. /*
  108. * 2. disable auto refresh and ZQ calibration
  109. * before proceeding with Write Leveling calibration
  110. */
  111. esdmisc_val = readl(&mmdc0->mdref);
  112. writel(0x0000C000, &mmdc0->mdref);
  113. zq_val = readl(&mmdc0->mpzqhwctrl);
  114. writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl);
  115. /* 3. increase walat and ralat to maximum */
  116. rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17);
  117. setbits_le32(&mmdc0->mdmisc, rwalat_max);
  118. if (sysinfo->dsize == 2)
  119. setbits_le32(&mmdc1->mdmisc, rwalat_max);
  120. /*
  121. * 4 & 5. Configure the external DDR device to enter write-leveling
  122. * mode through Load Mode Register command.
  123. * Register setting:
  124. * Bits[31:16] MR1 value (0x0080 write leveling enable)
  125. * Bit[9] set WL_EN to enable MMDC DQS output
  126. * Bits[6:4] set CMD bits for Load Mode Register programming
  127. * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
  128. */
  129. writel(0x00808231, &mmdc0->mdscr);
  130. /* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */
  131. writel(0x00000001, &mmdc0->mpwlgcr);
  132. /*
  133. * 7. Upon completion of this process the MMDC de-asserts
  134. * the MPWLGCR[HW_WL_EN]
  135. */
  136. wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0);
  137. /*
  138. * 8. check for any errors: check both PHYs for x64 configuration,
  139. * if x32, check only PHY0
  140. */
  141. if (readl(&mmdc0->mpwlgcr) & 0x00000F00)
  142. errors |= 1;
  143. if (sysinfo->dsize == 2)
  144. if (readl(&mmdc1->mpwlgcr) & 0x00000F00)
  145. errors |= 2;
  146. debug("Ending write leveling calibration. Error mask: 0x%x\n", errors);
  147. /* check to see if cal failed */
  148. if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) &&
  149. (readl(&mmdc0->mpwldectrl1) == 0x001F001F) &&
  150. ((sysinfo->dsize < 2) ||
  151. ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) &&
  152. (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) {
  153. debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n");
  154. writel(ldectrl[0], &mmdc0->mpwldectrl0);
  155. writel(ldectrl[1], &mmdc0->mpwldectrl1);
  156. if (sysinfo->dsize == 2) {
  157. writel(ldectrl[2], &mmdc1->mpwldectrl0);
  158. writel(ldectrl[3], &mmdc1->mpwldectrl1);
  159. }
  160. errors |= 4;
  161. }
  162. correct_mpwldectr_result(&mmdc0->mpwldectrl0);
  163. correct_mpwldectr_result(&mmdc0->mpwldectrl1);
  164. if (sysinfo->dsize == 2) {
  165. correct_mpwldectr_result(&mmdc1->mpwldectrl0);
  166. correct_mpwldectr_result(&mmdc1->mpwldectrl1);
  167. }
  168. /*
  169. * User should issue MRS command to exit write leveling mode
  170. * through Load Mode Register command
  171. * Register setting:
  172. * Bits[31:16] MR1 value "ddr_mr1" value from initialization
  173. * Bit[9] clear WL_EN to disable MMDC DQS output
  174. * Bits[6:4] set CMD bits for Load Mode Register programming
  175. * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
  176. */
  177. writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr);
  178. /* re-enable auto refresh and zq cal */
  179. writel(esdmisc_val, &mmdc0->mdref);
  180. writel(zq_val, &mmdc0->mpzqhwctrl);
  181. debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
  182. readl(&mmdc0->mpwldectrl0));
  183. debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
  184. readl(&mmdc0->mpwldectrl1));
  185. if (sysinfo->dsize == 2) {
  186. debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
  187. readl(&mmdc1->mpwldectrl0));
  188. debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
  189. readl(&mmdc1->mpwldectrl1));
  190. }
  191. /* We must force a readback of these values, to get them to stick */
  192. readl(&mmdc0->mpwldectrl0);
  193. readl(&mmdc0->mpwldectrl1);
  194. if (sysinfo->dsize == 2) {
  195. readl(&mmdc1->mpwldectrl0);
  196. readl(&mmdc1->mpwldectrl1);
  197. }
  198. /* enable DDR logic power down timer: */
  199. setbits_le32(&mmdc0->mdpdc, 0x00005500);
  200. /* Enable Adopt power down timer: */
  201. clrbits_le32(&mmdc0->mapsr, 0x1);
  202. /* Clear CON_REQ */
  203. writel(0, &mmdc0->mdscr);
  204. return errors;
  205. }
  206. int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo)
  207. {
  208. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  209. struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
  210. struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux =
  211. (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
  212. bool cs0_enable;
  213. bool cs1_enable;
  214. bool cs0_enable_initial;
  215. bool cs1_enable_initial;
  216. u32 esdmisc_val;
  217. u32 temp_ref;
  218. u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */
  219. u32 errors = 0;
  220. u32 initdelay = 0x40404040;
  221. /* check to see which chip selects are enabled */
  222. cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000;
  223. cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000;
  224. /* disable DDR logic power down timer: */
  225. clrbits_le32(&mmdc0->mdpdc, 0xff00);
  226. /* disable Adopt power down timer: */
  227. setbits_le32(&mmdc0->mapsr, 0x1);
  228. /* set DQS pull ups */
  229. setbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
  230. setbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
  231. setbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
  232. setbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
  233. setbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
  234. setbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
  235. setbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
  236. setbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
  237. /* Save old RALAT and WALAT values */
  238. esdmisc_val = readl(&mmdc0->mdmisc);
  239. setbits_le32(&mmdc0->mdmisc,
  240. (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17));
  241. /* Disable auto refresh before proceeding with calibration */
  242. temp_ref = readl(&mmdc0->mdref);
  243. writel(0x0000c000, &mmdc0->mdref);
  244. /*
  245. * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2,
  246. * this also sets the CON_REQ bit.
  247. */
  248. if (cs0_enable_initial)
  249. writel(0x00008020, &mmdc0->mdscr);
  250. if (cs1_enable_initial)
  251. writel(0x00008028, &mmdc0->mdscr);
  252. /* poll to make sure the con_ack bit was asserted */
  253. wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
  254. /*
  255. * Check MDMISC register CALIB_PER_CS to see which CS calibration
  256. * is targeted to (under normal cases, it should be cleared
  257. * as this is the default value, indicating calibration is directed
  258. * to CS0).
  259. * Disable the other chip select not being target for calibration
  260. * to avoid any potential issues. This will get re-enabled at end
  261. * of calibration.
  262. */
  263. if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0)
  264. clrbits_le32(&mmdc0->mdctl, 1 << 30); /* clear SDE_1 */
  265. else
  266. clrbits_le32(&mmdc0->mdctl, 1 << 31); /* clear SDE_0 */
  267. /*
  268. * Check to see which chip selects are now enabled for
  269. * the remainder of the calibration.
  270. */
  271. cs0_enable = readl(&mmdc0->mdctl) & 0x80000000;
  272. cs1_enable = readl(&mmdc0->mdctl) & 0x40000000;
  273. precharge_all(cs0_enable, cs1_enable);
  274. /* Write the pre-defined value into MPPDCMPR1 */
  275. writel(pddword, &mmdc0->mppdcmpr1);
  276. /*
  277. * Issue a write access to the external DDR device by setting
  278. * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll
  279. * this bit until it clears to indicate completion of the write access.
  280. */
  281. setbits_le32(&mmdc0->mpswdar0, 1);
  282. wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0);
  283. /* Set the RD_DL_ABS# bits to their default values
  284. * (will be calibrated later in the read delay-line calibration).
  285. * Both PHYs for x64 configuration, if x32, do only PHY0.
  286. */
  287. writel(initdelay, &mmdc0->mprddlctl);
  288. if (sysinfo->dsize == 0x2)
  289. writel(initdelay, &mmdc1->mprddlctl);
  290. /* Force a measurment, for previous delay setup to take effect. */
  291. force_delay_measurement(sysinfo->dsize);
  292. /*
  293. * ***************************
  294. * Read DQS Gating calibration
  295. * ***************************
  296. */
  297. debug("Starting Read DQS Gating calibration.\n");
  298. /*
  299. * Reset the read data FIFOs (two resets); only need to issue reset
  300. * to PHY0 since in x64 mode, the reset will also go to PHY1.
  301. */
  302. reset_read_data_fifos();
  303. /*
  304. * Start the automatic read DQS gating calibration process by
  305. * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC]
  306. * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears
  307. * to indicate completion.
  308. * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate
  309. * no errors were seen during calibration.
  310. */
  311. /*
  312. * Set bit 30: chooses option to wait 32 cycles instead of
  313. * 16 before comparing read data.
  314. */
  315. setbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
  316. if (sysinfo->dsize == 2)
  317. setbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
  318. /* Set bit 28 to start automatic read DQS gating calibration */
  319. setbits_le32(&mmdc0->mpdgctrl0, 5 << 28);
  320. /* Poll for completion. MPDGCTRL0[HW_DG_EN] should be 0 */
  321. wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0);
  322. /*
  323. * Check to see if any errors were encountered during calibration
  324. * (check MPDGCTRL0[HW_DG_ERR]).
  325. * Check both PHYs for x64 configuration, if x32, check only PHY0.
  326. */
  327. if (readl(&mmdc0->mpdgctrl0) & 0x00001000)
  328. errors |= 1;
  329. if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000))
  330. errors |= 2;
  331. /* now disable mpdgctrl0[DG_CMP_CYC] */
  332. clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
  333. if (sysinfo->dsize == 2)
  334. clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
  335. /*
  336. * DQS gating absolute offset should be modified from
  337. * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to
  338. * reflecting (HW_DG_UPx - 0x80)
  339. */
  340. modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1,
  341. &mmdc0->mpdgctrl0);
  342. modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3,
  343. &mmdc0->mpdgctrl1);
  344. if (sysinfo->dsize == 0x2) {
  345. modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1,
  346. &mmdc1->mpdgctrl0);
  347. modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3,
  348. &mmdc1->mpdgctrl1);
  349. }
  350. debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors);
  351. /*
  352. * **********************
  353. * Read Delay calibration
  354. * **********************
  355. */
  356. debug("Starting Read Delay calibration.\n");
  357. reset_read_data_fifos();
  358. /*
  359. * 4. Issue the Precharge-All command to the DDR device for both
  360. * chip selects. If only using one chip select, then precharge
  361. * only the desired chip select.
  362. */
  363. precharge_all(cs0_enable, cs1_enable);
  364. /*
  365. * 9. Read delay-line calibration
  366. * Start the automatic read calibration process by asserting
  367. * MPRDDLHWCTL[HW_RD_DL_EN].
  368. */
  369. writel(0x00000030, &mmdc0->mprddlhwctl);
  370. /*
  371. * 10. poll for completion
  372. * MMDC indicates that the write data calibration had finished by
  373. * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0. Also, ensure that
  374. * no error bits were set.
  375. */
  376. wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0);
  377. /* check both PHYs for x64 configuration, if x32, check only PHY0 */
  378. if (readl(&mmdc0->mprddlhwctl) & 0x0000000f)
  379. errors |= 4;
  380. if ((sysinfo->dsize == 0x2) &&
  381. (readl(&mmdc1->mprddlhwctl) & 0x0000000f))
  382. errors |= 8;
  383. debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors);
  384. /*
  385. * ***********************
  386. * Write Delay Calibration
  387. * ***********************
  388. */
  389. debug("Starting Write Delay calibration.\n");
  390. reset_read_data_fifos();
  391. /*
  392. * 4. Issue the Precharge-All command to the DDR device for both
  393. * chip selects. If only using one chip select, then precharge
  394. * only the desired chip select.
  395. */
  396. precharge_all(cs0_enable, cs1_enable);
  397. /*
  398. * 8. Set the WR_DL_ABS# bits to their default values.
  399. * Both PHYs for x64 configuration, if x32, do only PHY0.
  400. */
  401. writel(initdelay, &mmdc0->mpwrdlctl);
  402. if (sysinfo->dsize == 0x2)
  403. writel(initdelay, &mmdc1->mpwrdlctl);
  404. /*
  405. * XXX This isn't in the manual. Force a measurement,
  406. * for previous delay setup to effect.
  407. */
  408. force_delay_measurement(sysinfo->dsize);
  409. /*
  410. * 9. 10. Start the automatic write calibration process
  411. * by asserting MPWRDLHWCTL0[HW_WR_DL_EN].
  412. */
  413. writel(0x00000030, &mmdc0->mpwrdlhwctl);
  414. /*
  415. * Poll for completion.
  416. * MMDC indicates that the write data calibration had finished
  417. * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0.
  418. * Also, ensure that no error bits were set.
  419. */
  420. wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0);
  421. /* Check both PHYs for x64 configuration, if x32, check only PHY0 */
  422. if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f)
  423. errors |= 16;
  424. if ((sysinfo->dsize == 0x2) &&
  425. (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f))
  426. errors |= 32;
  427. debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors);
  428. reset_read_data_fifos();
  429. /* Enable DDR logic power down timer */
  430. setbits_le32(&mmdc0->mdpdc, 0x00005500);
  431. /* Enable Adopt power down timer */
  432. clrbits_le32(&mmdc0->mapsr, 0x1);
  433. /* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */
  434. writel(esdmisc_val, &mmdc0->mdmisc);
  435. /* Clear DQS pull ups */
  436. clrbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
  437. clrbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
  438. clrbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
  439. clrbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
  440. clrbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
  441. clrbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
  442. clrbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
  443. clrbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
  444. /* Re-enable SDE (chip selects) if they were set initially */
  445. if (cs1_enable_initial)
  446. /* Set SDE_1 */
  447. setbits_le32(&mmdc0->mdctl, 1 << 30);
  448. if (cs0_enable_initial)
  449. /* Set SDE_0 */
  450. setbits_le32(&mmdc0->mdctl, 1 << 31);
  451. /* Re-enable to auto refresh */
  452. writel(temp_ref, &mmdc0->mdref);
  453. /* Clear the MDSCR (including the con_req bit) */
  454. writel(0x0, &mmdc0->mdscr); /* CS0 */
  455. /* Poll to make sure the con_ack bit is clear */
  456. wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0);
  457. /*
  458. * Print out the registers that were updated as a result
  459. * of the calibration process.
  460. */
  461. debug("MMDC registers updated from calibration\n");
  462. debug("Read DQS gating calibration:\n");
  463. debug("\tMPDGCTRL0 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl0));
  464. debug("\tMPDGCTRL1 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl1));
  465. if (sysinfo->dsize == 2) {
  466. debug("\tMPDGCTRL0 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl0));
  467. debug("\tMPDGCTRL1 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl1));
  468. }
  469. debug("Read calibration:\n");
  470. debug("\tMPRDDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mprddlctl));
  471. if (sysinfo->dsize == 2)
  472. debug("\tMPRDDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mprddlctl));
  473. debug("Write calibration:\n");
  474. debug("\tMPWRDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mpwrdlctl));
  475. if (sysinfo->dsize == 2)
  476. debug("\tMPWRDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mpwrdlctl));
  477. /*
  478. * Registers below are for debugging purposes. These print out
  479. * the upper and lower boundaries captured during
  480. * read DQS gating calibration.
  481. */
  482. debug("Status registers bounds for read DQS gating:\n");
  483. debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0));
  484. debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1));
  485. debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2));
  486. debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3));
  487. if (sysinfo->dsize == 2) {
  488. debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0));
  489. debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1));
  490. debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2));
  491. debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3));
  492. }
  493. debug("Final do_dqs_calibration error mask: 0x%x\n", errors);
  494. return errors;
  495. }
  496. #endif
  497. #if defined(CONFIG_MX6SX)
  498. /* Configure MX6SX mmdc iomux */
  499. void mx6sx_dram_iocfg(unsigned width,
  500. const struct mx6sx_iomux_ddr_regs *ddr,
  501. const struct mx6sx_iomux_grp_regs *grp)
  502. {
  503. struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux;
  504. struct mx6sx_iomux_grp_regs *mx6_grp_iomux;
  505. mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE;
  506. mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE;
  507. /* DDR IO TYPE */
  508. writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
  509. writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
  510. /* CLOCK */
  511. writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
  512. /* ADDRESS */
  513. writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
  514. writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
  515. writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
  516. /* Control */
  517. writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
  518. writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
  519. writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0);
  520. writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1);
  521. writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
  522. writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
  523. writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
  524. /* Data Strobes */
  525. writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
  526. writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
  527. writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
  528. if (width >= 32) {
  529. writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2);
  530. writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3);
  531. }
  532. /* Data */
  533. writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
  534. writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
  535. writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
  536. if (width >= 32) {
  537. writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds);
  538. writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds);
  539. }
  540. writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
  541. writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
  542. if (width >= 32) {
  543. writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2);
  544. writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3);
  545. }
  546. }
  547. #endif
  548. #if defined(CONFIG_MX6UL) || defined(CONFIG_MX6ULL)
  549. void mx6ul_dram_iocfg(unsigned width,
  550. const struct mx6ul_iomux_ddr_regs *ddr,
  551. const struct mx6ul_iomux_grp_regs *grp)
  552. {
  553. struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux;
  554. struct mx6ul_iomux_grp_regs *mx6_grp_iomux;
  555. mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE;
  556. mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE;
  557. /* DDR IO TYPE */
  558. writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
  559. writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
  560. /* CLOCK */
  561. writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
  562. /* ADDRESS */
  563. writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
  564. writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
  565. writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
  566. /* Control */
  567. writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
  568. writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
  569. writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
  570. writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
  571. writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
  572. /* Data Strobes */
  573. writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
  574. writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
  575. writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
  576. /* Data */
  577. writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
  578. writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
  579. writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
  580. writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
  581. writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
  582. }
  583. #endif
  584. #if defined(CONFIG_MX6SL)
  585. void mx6sl_dram_iocfg(unsigned width,
  586. const struct mx6sl_iomux_ddr_regs *ddr,
  587. const struct mx6sl_iomux_grp_regs *grp)
  588. {
  589. struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux;
  590. struct mx6sl_iomux_grp_regs *mx6_grp_iomux;
  591. mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE;
  592. mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE;
  593. /* DDR IO TYPE */
  594. mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
  595. mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
  596. /* CLOCK */
  597. mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
  598. /* ADDRESS */
  599. mx6_ddr_iomux->dram_cas = ddr->dram_cas;
  600. mx6_ddr_iomux->dram_ras = ddr->dram_ras;
  601. mx6_grp_iomux->grp_addds = grp->grp_addds;
  602. /* Control */
  603. mx6_ddr_iomux->dram_reset = ddr->dram_reset;
  604. mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
  605. mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
  606. /* Data Strobes */
  607. mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
  608. mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
  609. mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
  610. if (width >= 32) {
  611. mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
  612. mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
  613. }
  614. /* Data */
  615. mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
  616. mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
  617. mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
  618. if (width >= 32) {
  619. mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
  620. mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
  621. }
  622. mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
  623. mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
  624. if (width >= 32) {
  625. mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
  626. mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
  627. }
  628. }
  629. #endif
  630. #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D)
  631. /* Configure MX6DQ mmdc iomux */
  632. void mx6dq_dram_iocfg(unsigned width,
  633. const struct mx6dq_iomux_ddr_regs *ddr,
  634. const struct mx6dq_iomux_grp_regs *grp)
  635. {
  636. volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux;
  637. volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux;
  638. mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
  639. mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE;
  640. /* DDR IO Type */
  641. mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
  642. mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
  643. /* Clock */
  644. mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
  645. mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
  646. /* Address */
  647. mx6_ddr_iomux->dram_cas = ddr->dram_cas;
  648. mx6_ddr_iomux->dram_ras = ddr->dram_ras;
  649. mx6_grp_iomux->grp_addds = grp->grp_addds;
  650. /* Control */
  651. mx6_ddr_iomux->dram_reset = ddr->dram_reset;
  652. mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
  653. mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
  654. mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
  655. mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
  656. mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
  657. mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
  658. /* Data Strobes */
  659. mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
  660. mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
  661. mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
  662. if (width >= 32) {
  663. mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
  664. mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
  665. }
  666. if (width >= 64) {
  667. mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
  668. mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
  669. mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
  670. mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
  671. }
  672. /* Data */
  673. mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
  674. mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
  675. mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
  676. if (width >= 32) {
  677. mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
  678. mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
  679. }
  680. if (width >= 64) {
  681. mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
  682. mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
  683. mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
  684. mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
  685. }
  686. mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
  687. mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
  688. if (width >= 32) {
  689. mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
  690. mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
  691. }
  692. if (width >= 64) {
  693. mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
  694. mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
  695. mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
  696. mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
  697. }
  698. }
  699. #endif
  700. #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S)
  701. /* Configure MX6SDL mmdc iomux */
  702. void mx6sdl_dram_iocfg(unsigned width,
  703. const struct mx6sdl_iomux_ddr_regs *ddr,
  704. const struct mx6sdl_iomux_grp_regs *grp)
  705. {
  706. volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux;
  707. volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux;
  708. mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE;
  709. mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE;
  710. /* DDR IO Type */
  711. mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
  712. mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
  713. /* Clock */
  714. mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
  715. mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
  716. /* Address */
  717. mx6_ddr_iomux->dram_cas = ddr->dram_cas;
  718. mx6_ddr_iomux->dram_ras = ddr->dram_ras;
  719. mx6_grp_iomux->grp_addds = grp->grp_addds;
  720. /* Control */
  721. mx6_ddr_iomux->dram_reset = ddr->dram_reset;
  722. mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
  723. mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
  724. mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
  725. mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
  726. mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
  727. mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
  728. /* Data Strobes */
  729. mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
  730. mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
  731. mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
  732. if (width >= 32) {
  733. mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
  734. mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
  735. }
  736. if (width >= 64) {
  737. mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
  738. mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
  739. mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
  740. mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
  741. }
  742. /* Data */
  743. mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
  744. mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
  745. mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
  746. if (width >= 32) {
  747. mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
  748. mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
  749. }
  750. if (width >= 64) {
  751. mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
  752. mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
  753. mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
  754. mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
  755. }
  756. mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
  757. mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
  758. if (width >= 32) {
  759. mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
  760. mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
  761. }
  762. if (width >= 64) {
  763. mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
  764. mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
  765. mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
  766. mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
  767. }
  768. }
  769. #endif
  770. /*
  771. * Configure mx6 mmdc registers based on:
  772. * - board-specific memory configuration
  773. * - board-specific calibration data
  774. * - ddr3/lpddr2 chip details
  775. *
  776. * The various calculations here are derived from the Freescale
  777. * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate
  778. * MMDC configuration registers based on memory system and memory chip
  779. * parameters.
  780. *
  781. * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC
  782. * configuration registers based on memory system and memory chip
  783. * parameters.
  784. *
  785. * The defaults here are those which were specified in the spreadsheet.
  786. * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM
  787. * and/or IMX6SLRM section titled MMDC initialization.
  788. */
  789. #define MR(val, ba, cmd, cs1) \
  790. ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba)
  791. #define MMDC1(entry, value) do { \
  792. if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl()) \
  793. mmdc1->entry = value; \
  794. } while (0)
  795. /*
  796. * According JESD209-2B-LPDDR2: Table 103
  797. * WL: write latency
  798. */
  799. static int lpddr2_wl(uint32_t mem_speed)
  800. {
  801. switch (mem_speed) {
  802. case 1066:
  803. case 933:
  804. return 4;
  805. case 800:
  806. return 3;
  807. case 677:
  808. case 533:
  809. return 2;
  810. case 400:
  811. case 333:
  812. return 1;
  813. default:
  814. puts("invalid memory speed\n");
  815. hang();
  816. }
  817. return 0;
  818. }
  819. /*
  820. * According JESD209-2B-LPDDR2: Table 103
  821. * RL: read latency
  822. */
  823. static int lpddr2_rl(uint32_t mem_speed)
  824. {
  825. switch (mem_speed) {
  826. case 1066:
  827. return 8;
  828. case 933:
  829. return 7;
  830. case 800:
  831. return 6;
  832. case 677:
  833. return 5;
  834. case 533:
  835. return 4;
  836. case 400:
  837. case 333:
  838. return 3;
  839. default:
  840. puts("invalid memory speed\n");
  841. hang();
  842. }
  843. return 0;
  844. }
  845. void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo,
  846. const struct mx6_mmdc_calibration *calib,
  847. const struct mx6_lpddr2_cfg *lpddr2_cfg)
  848. {
  849. volatile struct mmdc_p_regs *mmdc0;
  850. u32 val;
  851. u8 tcke, tcksrx, tcksre, trrd;
  852. u8 twl, txp, tfaw, tcl;
  853. u16 tras, twr, tmrd, trtp, twtr, trfc, txsr;
  854. u16 trcd_lp, trppb_lp, trpab_lp, trc_lp;
  855. u16 cs0_end;
  856. u8 coladdr;
  857. int clkper; /* clock period in picoseconds */
  858. int clock; /* clock freq in mHz */
  859. int cs;
  860. /* only support 16/32 bits */
  861. if (sysinfo->dsize > 1)
  862. hang();
  863. mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  864. clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U;
  865. clkper = (1000 * 1000) / clock; /* pico seconds */
  866. twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1;
  867. /* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */
  868. switch (lpddr2_cfg->density) {
  869. case 1:
  870. case 2:
  871. case 4:
  872. trfc = DIV_ROUND_UP(130000, clkper) - 1;
  873. txsr = DIV_ROUND_UP(140000, clkper) - 1;
  874. break;
  875. case 8:
  876. trfc = DIV_ROUND_UP(210000, clkper) - 1;
  877. txsr = DIV_ROUND_UP(220000, clkper) - 1;
  878. break;
  879. default:
  880. /*
  881. * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently.
  882. */
  883. hang();
  884. break;
  885. }
  886. /*
  887. * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode,
  888. * set them to 0. */
  889. txp = DIV_ROUND_UP(7500, clkper) - 1;
  890. tcke = 3;
  891. if (lpddr2_cfg->mem_speed == 333)
  892. tfaw = DIV_ROUND_UP(60000, clkper) - 1;
  893. else
  894. tfaw = DIV_ROUND_UP(50000, clkper) - 1;
  895. trrd = DIV_ROUND_UP(10000, clkper) - 1;
  896. /* tckesr for LPDDR2 */
  897. tcksre = DIV_ROUND_UP(15000, clkper);
  898. tcksrx = tcksre;
  899. twr = DIV_ROUND_UP(15000, clkper) - 1;
  900. /*
  901. * tMRR: 2, tMRW: 5
  902. * tMRD should be set to max(tMRR, tMRW)
  903. */
  904. tmrd = 5;
  905. tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1;
  906. /* LPDDR2 mode use tRCD_LP filed in MDCFG3. */
  907. trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1;
  908. trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp,
  909. clkper / 10) - 1;
  910. trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1;
  911. trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1;
  912. /* To LPDDR2, CL in MDCFG0 refers to RL */
  913. tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3;
  914. twtr = DIV_ROUND_UP(7500, clkper) - 1;
  915. trtp = DIV_ROUND_UP(7500, clkper) - 1;
  916. cs0_end = 4 * sysinfo->cs_density - 1;
  917. debug("density:%d Gb (%d Gb per chip)\n",
  918. sysinfo->cs_density, lpddr2_cfg->density);
  919. debug("clock: %dMHz (%d ps)\n", clock, clkper);
  920. debug("memspd:%d\n", lpddr2_cfg->mem_speed);
  921. debug("trcd_lp=%d\n", trcd_lp);
  922. debug("trppb_lp=%d\n", trppb_lp);
  923. debug("trpab_lp=%d\n", trpab_lp);
  924. debug("trc_lp=%d\n", trc_lp);
  925. debug("tcke=%d\n", tcke);
  926. debug("tcksrx=%d\n", tcksrx);
  927. debug("tcksre=%d\n", tcksre);
  928. debug("trfc=%d\n", trfc);
  929. debug("txsr=%d\n", txsr);
  930. debug("txp=%d\n", txp);
  931. debug("tfaw=%d\n", tfaw);
  932. debug("tcl=%d\n", tcl);
  933. debug("tras=%d\n", tras);
  934. debug("twr=%d\n", twr);
  935. debug("tmrd=%d\n", tmrd);
  936. debug("twl=%d\n", twl);
  937. debug("trtp=%d\n", trtp);
  938. debug("twtr=%d\n", twtr);
  939. debug("trrd=%d\n", trrd);
  940. debug("cs0_end=%d\n", cs0_end);
  941. debug("ncs=%d\n", sysinfo->ncs);
  942. /*
  943. * board-specific configuration:
  944. * These values are determined empirically and vary per board layout
  945. */
  946. mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
  947. mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
  948. mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
  949. mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
  950. mmdc0->mprddlctl = calib->p0_mprddlctl;
  951. mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
  952. mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl;
  953. /* Read data DQ Byte0-3 delay */
  954. mmdc0->mprddqby0dl = 0x33333333;
  955. mmdc0->mprddqby1dl = 0x33333333;
  956. if (sysinfo->dsize > 0) {
  957. mmdc0->mprddqby2dl = 0x33333333;
  958. mmdc0->mprddqby3dl = 0x33333333;
  959. }
  960. /* Write data DQ Byte0-3 delay */
  961. mmdc0->mpwrdqby0dl = 0xf3333333;
  962. mmdc0->mpwrdqby1dl = 0xf3333333;
  963. if (sysinfo->dsize > 0) {
  964. mmdc0->mpwrdqby2dl = 0xf3333333;
  965. mmdc0->mpwrdqby3dl = 0xf3333333;
  966. }
  967. /*
  968. * In LPDDR2 mode this register should be cleared,
  969. * so no termination will be activated.
  970. */
  971. mmdc0->mpodtctrl = 0;
  972. /* complete calibration */
  973. val = (1 << 11); /* Force measurement on delay-lines */
  974. mmdc0->mpmur0 = val;
  975. /* Step 1: configuration request */
  976. mmdc0->mdscr = (u32)(1 << 15); /* config request */
  977. /* Step 2: Timing configuration */
  978. mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) |
  979. (tfaw << 4) | tcl;
  980. mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl;
  981. mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd;
  982. mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) |
  983. (trppb_lp << 4) | trpab_lp;
  984. mmdc0->mdotc = 0;
  985. mmdc0->mdasp = cs0_end; /* CS addressing */
  986. /* Step 3: Configure DDR type */
  987. mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
  988. (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
  989. (sysinfo->ralat << 6) | (1 << 3);
  990. /* Step 4: Configure delay while leaving reset */
  991. mmdc0->mdor = (sysinfo->sde_to_rst << 8) |
  992. (sysinfo->rst_to_cke << 0);
  993. /* Step 5: Configure DDR physical parameters (density and burst len) */
  994. coladdr = lpddr2_cfg->coladdr;
  995. if (lpddr2_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
  996. coladdr += 4;
  997. else if (lpddr2_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
  998. coladdr += 1;
  999. mmdc0->mdctl = (lpddr2_cfg->rowaddr - 11) << 24 | /* ROW */
  1000. (coladdr - 9) << 20 | /* COL */
  1001. (0 << 19) | /* Burst Length = 4 for LPDDR2 */
  1002. (sysinfo->dsize << 16); /* DDR data bus size */
  1003. /* Step 6: Perform ZQ calibration */
  1004. val = 0xa1390003; /* one-time HW ZQ calib */
  1005. mmdc0->mpzqhwctrl = val;
  1006. /* Step 7: Enable MMDC with desired chip select */
  1007. mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
  1008. ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
  1009. /* Step 8: Write Mode Registers to Init LPDDR2 devices */
  1010. for (cs = 0; cs < sysinfo->ncs; cs++) {
  1011. /* MR63: reset */
  1012. mmdc0->mdscr = MR(63, 0, 3, cs);
  1013. /* MR10: calibration,
  1014. * 0xff is calibration command after intilization.
  1015. */
  1016. val = 0xA | (0xff << 8);
  1017. mmdc0->mdscr = MR(val, 0, 3, cs);
  1018. /* MR1 */
  1019. val = 0x1 | (0x82 << 8);
  1020. mmdc0->mdscr = MR(val, 0, 3, cs);
  1021. /* MR2 */
  1022. val = 0x2 | (0x04 << 8);
  1023. mmdc0->mdscr = MR(val, 0, 3, cs);
  1024. /* MR3 */
  1025. val = 0x3 | (0x02 << 8);
  1026. mmdc0->mdscr = MR(val, 0, 3, cs);
  1027. }
  1028. /* Step 10: Power down control and self-refresh */
  1029. mmdc0->mdpdc = (tcke & 0x7) << 16 |
  1030. 5 << 12 | /* PWDT_1: 256 cycles */
  1031. 5 << 8 | /* PWDT_0: 256 cycles */
  1032. 1 << 6 | /* BOTH_CS_PD */
  1033. (tcksrx & 0x7) << 3 |
  1034. (tcksre & 0x7);
  1035. mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
  1036. /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
  1037. val = 0xa1310003;
  1038. mmdc0->mpzqhwctrl = val;
  1039. /* Step 12: Configure and activate periodic refresh */
  1040. mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
  1041. /* Step 13: Deassert config request - init complete */
  1042. mmdc0->mdscr = 0x00000000;
  1043. /* wait for auto-ZQ calibration to complete */
  1044. mdelay(1);
  1045. }
  1046. void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo,
  1047. const struct mx6_mmdc_calibration *calib,
  1048. const struct mx6_ddr3_cfg *ddr3_cfg)
  1049. {
  1050. volatile struct mmdc_p_regs *mmdc0;
  1051. volatile struct mmdc_p_regs *mmdc1;
  1052. u32 val;
  1053. u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd;
  1054. u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl;
  1055. u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */
  1056. u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr;
  1057. u16 cs0_end;
  1058. u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */
  1059. u8 coladdr;
  1060. int clkper; /* clock period in picoseconds */
  1061. int clock; /* clock freq in MHz */
  1062. int cs;
  1063. u16 mem_speed = ddr3_cfg->mem_speed;
  1064. mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  1065. if (!is_mx6sx() && !is_mx6ul() && !is_mx6ull() && !is_mx6sl())
  1066. mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
  1067. /* Limit mem_speed for MX6D/MX6Q */
  1068. if (is_mx6dq() || is_mx6dqp()) {
  1069. if (mem_speed > 1066)
  1070. mem_speed = 1066; /* 1066 MT/s */
  1071. tcwl = 4;
  1072. }
  1073. /* Limit mem_speed for MX6S/MX6DL */
  1074. else {
  1075. if (mem_speed > 800)
  1076. mem_speed = 800; /* 800 MT/s */
  1077. tcwl = 3;
  1078. }
  1079. clock = mem_speed / 2;
  1080. /*
  1081. * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports
  1082. * up to 528 MHz, so reduce the clock to fit chip specs
  1083. */
  1084. if (is_mx6dq() || is_mx6dqp()) {
  1085. if (clock > 528)
  1086. clock = 528; /* 528 MHz */
  1087. }
  1088. clkper = (1000 * 1000) / clock; /* pico seconds */
  1089. todtlon = tcwl;
  1090. taxpd = tcwl;
  1091. tanpd = tcwl;
  1092. switch (ddr3_cfg->density) {
  1093. case 1: /* 1Gb per chip */
  1094. trfc = DIV_ROUND_UP(110000, clkper) - 1;
  1095. txs = DIV_ROUND_UP(120000, clkper) - 1;
  1096. break;
  1097. case 2: /* 2Gb per chip */
  1098. trfc = DIV_ROUND_UP(160000, clkper) - 1;
  1099. txs = DIV_ROUND_UP(170000, clkper) - 1;
  1100. break;
  1101. case 4: /* 4Gb per chip */
  1102. trfc = DIV_ROUND_UP(260000, clkper) - 1;
  1103. txs = DIV_ROUND_UP(270000, clkper) - 1;
  1104. break;
  1105. case 8: /* 8Gb per chip */
  1106. trfc = DIV_ROUND_UP(350000, clkper) - 1;
  1107. txs = DIV_ROUND_UP(360000, clkper) - 1;
  1108. break;
  1109. default:
  1110. /* invalid density */
  1111. puts("invalid chip density\n");
  1112. hang();
  1113. break;
  1114. }
  1115. txpr = txs;
  1116. switch (mem_speed) {
  1117. case 800:
  1118. txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
  1119. tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
  1120. if (ddr3_cfg->pagesz == 1) {
  1121. tfaw = DIV_ROUND_UP(40000, clkper) - 1;
  1122. trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
  1123. } else {
  1124. tfaw = DIV_ROUND_UP(50000, clkper) - 1;
  1125. trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
  1126. }
  1127. break;
  1128. case 1066:
  1129. txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
  1130. tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1;
  1131. if (ddr3_cfg->pagesz == 1) {
  1132. tfaw = DIV_ROUND_UP(37500, clkper) - 1;
  1133. trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1;
  1134. } else {
  1135. tfaw = DIV_ROUND_UP(50000, clkper) - 1;
  1136. trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
  1137. }
  1138. break;
  1139. default:
  1140. puts("invalid memory speed\n");
  1141. hang();
  1142. break;
  1143. }
  1144. txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1;
  1145. tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper);
  1146. taonpd = DIV_ROUND_UP(2000, clkper) - 1;
  1147. tcksrx = tcksre;
  1148. taofpd = taonpd;
  1149. twr = DIV_ROUND_UP(15000, clkper) - 1;
  1150. tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1;
  1151. trc = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1;
  1152. tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1;
  1153. tcl = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3;
  1154. trp = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1;
  1155. twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1;
  1156. trcd = trp;
  1157. trtp = twtr;
  1158. cs0_end = 4 * sysinfo->cs_density - 1;
  1159. debug("density:%d Gb (%d Gb per chip)\n",
  1160. sysinfo->cs_density, ddr3_cfg->density);
  1161. debug("clock: %dMHz (%d ps)\n", clock, clkper);
  1162. debug("memspd:%d\n", mem_speed);
  1163. debug("tcke=%d\n", tcke);
  1164. debug("tcksrx=%d\n", tcksrx);
  1165. debug("tcksre=%d\n", tcksre);
  1166. debug("taofpd=%d\n", taofpd);
  1167. debug("taonpd=%d\n", taonpd);
  1168. debug("todtlon=%d\n", todtlon);
  1169. debug("tanpd=%d\n", tanpd);
  1170. debug("taxpd=%d\n", taxpd);
  1171. debug("trfc=%d\n", trfc);
  1172. debug("txs=%d\n", txs);
  1173. debug("txp=%d\n", txp);
  1174. debug("txpdll=%d\n", txpdll);
  1175. debug("tfaw=%d\n", tfaw);
  1176. debug("tcl=%d\n", tcl);
  1177. debug("trcd=%d\n", trcd);
  1178. debug("trp=%d\n", trp);
  1179. debug("trc=%d\n", trc);
  1180. debug("tras=%d\n", tras);
  1181. debug("twr=%d\n", twr);
  1182. debug("tmrd=%d\n", tmrd);
  1183. debug("tcwl=%d\n", tcwl);
  1184. debug("tdllk=%d\n", tdllk);
  1185. debug("trtp=%d\n", trtp);
  1186. debug("twtr=%d\n", twtr);
  1187. debug("trrd=%d\n", trrd);
  1188. debug("txpr=%d\n", txpr);
  1189. debug("cs0_end=%d\n", cs0_end);
  1190. debug("ncs=%d\n", sysinfo->ncs);
  1191. debug("Rtt_wr=%d\n", sysinfo->rtt_wr);
  1192. debug("Rtt_nom=%d\n", sysinfo->rtt_nom);
  1193. debug("SRT=%d\n", ddr3_cfg->SRT);
  1194. debug("twr=%d\n", twr);
  1195. /*
  1196. * board-specific configuration:
  1197. * These values are determined empirically and vary per board layout
  1198. * see:
  1199. * appnote, ddr3 spreadsheet
  1200. */
  1201. mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
  1202. mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
  1203. mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
  1204. mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
  1205. mmdc0->mprddlctl = calib->p0_mprddlctl;
  1206. mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
  1207. if (sysinfo->dsize > 1) {
  1208. MMDC1(mpwldectrl0, calib->p1_mpwldectrl0);
  1209. MMDC1(mpwldectrl1, calib->p1_mpwldectrl1);
  1210. MMDC1(mpdgctrl0, calib->p1_mpdgctrl0);
  1211. MMDC1(mpdgctrl1, calib->p1_mpdgctrl1);
  1212. MMDC1(mprddlctl, calib->p1_mprddlctl);
  1213. MMDC1(mpwrdlctl, calib->p1_mpwrdlctl);
  1214. }
  1215. /* Read data DQ Byte0-3 delay */
  1216. mmdc0->mprddqby0dl = 0x33333333;
  1217. mmdc0->mprddqby1dl = 0x33333333;
  1218. if (sysinfo->dsize > 0) {
  1219. mmdc0->mprddqby2dl = 0x33333333;
  1220. mmdc0->mprddqby3dl = 0x33333333;
  1221. }
  1222. if (sysinfo->dsize > 1) {
  1223. MMDC1(mprddqby0dl, 0x33333333);
  1224. MMDC1(mprddqby1dl, 0x33333333);
  1225. MMDC1(mprddqby2dl, 0x33333333);
  1226. MMDC1(mprddqby3dl, 0x33333333);
  1227. }
  1228. /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */
  1229. val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227;
  1230. mmdc0->mpodtctrl = val;
  1231. if (sysinfo->dsize > 1)
  1232. MMDC1(mpodtctrl, val);
  1233. /* complete calibration */
  1234. val = (1 << 11); /* Force measurement on delay-lines */
  1235. mmdc0->mpmur0 = val;
  1236. if (sysinfo->dsize > 1)
  1237. MMDC1(mpmur0, val);
  1238. /* Step 1: configuration request */
  1239. mmdc0->mdscr = (u32)(1 << 15); /* config request */
  1240. /* Step 2: Timing configuration */
  1241. mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) |
  1242. (txpdll << 9) | (tfaw << 4) | tcl;
  1243. mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) |
  1244. (tras << 16) | (1 << 15) /* trpa */ |
  1245. (twr << 9) | (tmrd << 5) | tcwl;
  1246. mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd;
  1247. mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) |
  1248. (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4);
  1249. mmdc0->mdasp = cs0_end; /* CS addressing */
  1250. /* Step 3: Configure DDR type */
  1251. mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
  1252. (sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
  1253. (sysinfo->ralat << 6);
  1254. /* Step 4: Configure delay while leaving reset */
  1255. mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) |
  1256. (sysinfo->rst_to_cke << 0);
  1257. /* Step 5: Configure DDR physical parameters (density and burst len) */
  1258. coladdr = ddr3_cfg->coladdr;
  1259. if (ddr3_cfg->coladdr == 8) /* 8-bit COL is 0x3 */
  1260. coladdr += 4;
  1261. else if (ddr3_cfg->coladdr == 12) /* 12-bit COL is 0x4 */
  1262. coladdr += 1;
  1263. mmdc0->mdctl = (ddr3_cfg->rowaddr - 11) << 24 | /* ROW */
  1264. (coladdr - 9) << 20 | /* COL */
  1265. (1 << 19) | /* Burst Length = 8 for DDR3 */
  1266. (sysinfo->dsize << 16); /* DDR data bus size */
  1267. /* Step 6: Perform ZQ calibration */
  1268. val = 0xa1390001; /* one-time HW ZQ calib */
  1269. mmdc0->mpzqhwctrl = val;
  1270. if (sysinfo->dsize > 1)
  1271. MMDC1(mpzqhwctrl, val);
  1272. /* Step 7: Enable MMDC with desired chip select */
  1273. mmdc0->mdctl |= (1 << 31) | /* SDE_0 for CS0 */
  1274. ((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
  1275. /* Step 8: Write Mode Registers to Init DDR3 devices */
  1276. for (cs = 0; cs < sysinfo->ncs; cs++) {
  1277. /* MR2 */
  1278. val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 |
  1279. ((tcwl - 3) & 3) << 3;
  1280. debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs));
  1281. mmdc0->mdscr = MR(val, 2, 3, cs);
  1282. /* MR3 */
  1283. debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs));
  1284. mmdc0->mdscr = MR(0, 3, 3, cs);
  1285. /* MR1 */
  1286. val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 |
  1287. ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6;
  1288. debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs));
  1289. mmdc0->mdscr = MR(val, 1, 3, cs);
  1290. /* MR0 */
  1291. val = ((tcl - 1) << 4) | /* CAS */
  1292. (1 << 8) | /* DLL Reset */
  1293. ((twr - 3) << 9) | /* Write Recovery */
  1294. (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */
  1295. debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs));
  1296. mmdc0->mdscr = MR(val, 0, 3, cs);
  1297. /* ZQ calibration */
  1298. val = (1 << 10);
  1299. mmdc0->mdscr = MR(val, 0, 4, cs);
  1300. }
  1301. /* Step 10: Power down control and self-refresh */
  1302. mmdc0->mdpdc = (tcke & 0x7) << 16 |
  1303. 5 << 12 | /* PWDT_1: 256 cycles */
  1304. 5 << 8 | /* PWDT_0: 256 cycles */
  1305. 1 << 6 | /* BOTH_CS_PD */
  1306. (tcksrx & 0x7) << 3 |
  1307. (tcksre & 0x7);
  1308. if (!sysinfo->pd_fast_exit)
  1309. mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */
  1310. mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
  1311. /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
  1312. val = 0xa1390003;
  1313. mmdc0->mpzqhwctrl = val;
  1314. if (sysinfo->dsize > 1)
  1315. MMDC1(mpzqhwctrl, val);
  1316. /* Step 12: Configure and activate periodic refresh */
  1317. mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
  1318. /* Step 13: Deassert config request - init complete */
  1319. mmdc0->mdscr = 0x00000000;
  1320. /* wait for auto-ZQ calibration to complete */
  1321. mdelay(1);
  1322. }
  1323. void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo,
  1324. struct mx6_mmdc_calibration *calib)
  1325. {
  1326. struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
  1327. struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
  1328. calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0);
  1329. calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1);
  1330. calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0);
  1331. calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1);
  1332. calib->p0_mprddlctl = readl(&mmdc0->mprddlctl);
  1333. calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl);
  1334. if (sysinfo->dsize == 2) {
  1335. calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0);
  1336. calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1);
  1337. calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0);
  1338. calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1);
  1339. calib->p1_mprddlctl = readl(&mmdc1->mprddlctl);
  1340. calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl);
  1341. }
  1342. }
  1343. void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo,
  1344. const struct mx6_mmdc_calibration *calib,
  1345. const void *ddr_cfg)
  1346. {
  1347. if (sysinfo->ddr_type == DDR_TYPE_DDR3) {
  1348. mx6_ddr3_cfg(sysinfo, calib, ddr_cfg);
  1349. } else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) {
  1350. mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg);
  1351. } else {
  1352. puts("Unsupported ddr type\n");
  1353. hang();
  1354. }
  1355. }