dmc_init_ddr3.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * DDR3 mem setup file for board based on EXYNOS5
  3. *
  4. * Copyright (C) 2012 Samsung Electronics
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #include <common.h>
  9. #include <config.h>
  10. #include <asm/io.h>
  11. #include <asm/arch/clock.h>
  12. #include <asm/arch/cpu.h>
  13. #include <asm/arch/dmc.h>
  14. #include <asm/arch/power.h>
  15. #include "common_setup.h"
  16. #include "exynos5_setup.h"
  17. #include "clock_init.h"
  18. #define TIMEOUT_US 10000
  19. #define NUM_BYTE_LANES 4
  20. #define DEFAULT_DQS 8
  21. #define DEFAULT_DQS_X4 (DEFAULT_DQS << 24) || (DEFAULT_DQS << 16) \
  22. || (DEFAULT_DQS << 8) || (DEFAULT_DQS << 0)
  23. #ifdef CONFIG_EXYNOS5250
  24. static void reset_phy_ctrl(void)
  25. {
  26. struct exynos5_clock *clk =
  27. (struct exynos5_clock *)samsung_get_base_clock();
  28. writel(DDR3PHY_CTRL_PHY_RESET_OFF, &clk->lpddr3phy_ctrl);
  29. writel(DDR3PHY_CTRL_PHY_RESET, &clk->lpddr3phy_ctrl);
  30. }
  31. int ddr3_mem_ctrl_init(struct mem_timings *mem, int reset)
  32. {
  33. unsigned int val;
  34. struct exynos5_phy_control *phy0_ctrl, *phy1_ctrl;
  35. struct exynos5_dmc *dmc;
  36. int i;
  37. phy0_ctrl = (struct exynos5_phy_control *)samsung_get_base_dmc_phy();
  38. phy1_ctrl = (struct exynos5_phy_control *)(samsung_get_base_dmc_phy()
  39. + DMC_OFFSET);
  40. dmc = (struct exynos5_dmc *)samsung_get_base_dmc_ctrl();
  41. if (reset)
  42. reset_phy_ctrl();
  43. /* Set Impedance Output Driver */
  44. val = (mem->impedance << CA_CK_DRVR_DS_OFFSET) |
  45. (mem->impedance << CA_CKE_DRVR_DS_OFFSET) |
  46. (mem->impedance << CA_CS_DRVR_DS_OFFSET) |
  47. (mem->impedance << CA_ADR_DRVR_DS_OFFSET);
  48. writel(val, &phy0_ctrl->phy_con39);
  49. writel(val, &phy1_ctrl->phy_con39);
  50. /* Set Read Latency and Burst Length for PHY0 and PHY1 */
  51. val = (mem->ctrl_bstlen << PHY_CON42_CTRL_BSTLEN_SHIFT) |
  52. (mem->ctrl_rdlat << PHY_CON42_CTRL_RDLAT_SHIFT);
  53. writel(val, &phy0_ctrl->phy_con42);
  54. writel(val, &phy1_ctrl->phy_con42);
  55. /* ZQ Calibration */
  56. if (dmc_config_zq(mem, &phy0_ctrl->phy_con16, &phy1_ctrl->phy_con16,
  57. &phy0_ctrl->phy_con17, &phy1_ctrl->phy_con17))
  58. return SETUP_ERR_ZQ_CALIBRATION_FAILURE;
  59. /* DQ Signal */
  60. writel(mem->phy0_pulld_dqs, &phy0_ctrl->phy_con14);
  61. writel(mem->phy1_pulld_dqs, &phy1_ctrl->phy_con14);
  62. writel(mem->concontrol | (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT)
  63. | (mem->dfi_init_start << CONCONTROL_DFI_INIT_START_SHIFT),
  64. &dmc->concontrol);
  65. update_reset_dll(&dmc->phycontrol0, DDR_MODE_DDR3);
  66. /* DQS Signal */
  67. writel(mem->phy0_dqs, &phy0_ctrl->phy_con4);
  68. writel(mem->phy1_dqs, &phy1_ctrl->phy_con4);
  69. writel(mem->phy0_dq, &phy0_ctrl->phy_con6);
  70. writel(mem->phy1_dq, &phy1_ctrl->phy_con6);
  71. writel(mem->phy0_tFS, &phy0_ctrl->phy_con10);
  72. writel(mem->phy1_tFS, &phy1_ctrl->phy_con10);
  73. val = (mem->ctrl_start_point << PHY_CON12_CTRL_START_POINT_SHIFT) |
  74. (mem->ctrl_inc << PHY_CON12_CTRL_INC_SHIFT) |
  75. (mem->ctrl_dll_on << PHY_CON12_CTRL_DLL_ON_SHIFT) |
  76. (mem->ctrl_ref << PHY_CON12_CTRL_REF_SHIFT);
  77. writel(val, &phy0_ctrl->phy_con12);
  78. writel(val, &phy1_ctrl->phy_con12);
  79. /* Start DLL locking */
  80. writel(val | (mem->ctrl_start << PHY_CON12_CTRL_START_SHIFT),
  81. &phy0_ctrl->phy_con12);
  82. writel(val | (mem->ctrl_start << PHY_CON12_CTRL_START_SHIFT),
  83. &phy1_ctrl->phy_con12);
  84. update_reset_dll(&dmc->phycontrol0, DDR_MODE_DDR3);
  85. writel(mem->concontrol | (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT),
  86. &dmc->concontrol);
  87. /* Memory Channel Inteleaving Size */
  88. writel(mem->iv_size, &dmc->ivcontrol);
  89. writel(mem->memconfig, &dmc->memconfig0);
  90. writel(mem->memconfig, &dmc->memconfig1);
  91. writel(mem->membaseconfig0, &dmc->membaseconfig0);
  92. writel(mem->membaseconfig1, &dmc->membaseconfig1);
  93. /* Precharge Configuration */
  94. writel(mem->prechconfig_tp_cnt << PRECHCONFIG_TP_CNT_SHIFT,
  95. &dmc->prechconfig);
  96. /* Power Down mode Configuration */
  97. writel(mem->dpwrdn_cyc << PWRDNCONFIG_DPWRDN_CYC_SHIFT |
  98. mem->dsref_cyc << PWRDNCONFIG_DSREF_CYC_SHIFT,
  99. &dmc->pwrdnconfig);
  100. /* TimingRow, TimingData, TimingPower and Timingaref
  101. * values as per Memory AC parameters
  102. */
  103. writel(mem->timing_ref, &dmc->timingref);
  104. writel(mem->timing_row, &dmc->timingrow);
  105. writel(mem->timing_data, &dmc->timingdata);
  106. writel(mem->timing_power, &dmc->timingpower);
  107. /* Send PALL command */
  108. dmc_config_prech(mem, &dmc->directcmd);
  109. /* Send NOP, MRS and ZQINIT commands */
  110. dmc_config_mrs(mem, &dmc->directcmd);
  111. if (mem->gate_leveling_enable) {
  112. val = PHY_CON0_RESET_VAL;
  113. val |= P0_CMD_EN;
  114. writel(val, &phy0_ctrl->phy_con0);
  115. writel(val, &phy1_ctrl->phy_con0);
  116. val = PHY_CON2_RESET_VAL;
  117. val |= INIT_DESKEW_EN;
  118. writel(val, &phy0_ctrl->phy_con2);
  119. writel(val, &phy1_ctrl->phy_con2);
  120. val = PHY_CON0_RESET_VAL;
  121. val |= P0_CMD_EN;
  122. val |= BYTE_RDLVL_EN;
  123. writel(val, &phy0_ctrl->phy_con0);
  124. writel(val, &phy1_ctrl->phy_con0);
  125. val = (mem->ctrl_start_point <<
  126. PHY_CON12_CTRL_START_POINT_SHIFT) |
  127. (mem->ctrl_inc << PHY_CON12_CTRL_INC_SHIFT) |
  128. (mem->ctrl_force << PHY_CON12_CTRL_FORCE_SHIFT) |
  129. (mem->ctrl_start << PHY_CON12_CTRL_START_SHIFT) |
  130. (mem->ctrl_ref << PHY_CON12_CTRL_REF_SHIFT);
  131. writel(val, &phy0_ctrl->phy_con12);
  132. writel(val, &phy1_ctrl->phy_con12);
  133. val = PHY_CON2_RESET_VAL;
  134. val |= INIT_DESKEW_EN;
  135. val |= RDLVL_GATE_EN;
  136. writel(val, &phy0_ctrl->phy_con2);
  137. writel(val, &phy1_ctrl->phy_con2);
  138. val = PHY_CON0_RESET_VAL;
  139. val |= P0_CMD_EN;
  140. val |= BYTE_RDLVL_EN;
  141. val |= CTRL_SHGATE;
  142. writel(val, &phy0_ctrl->phy_con0);
  143. writel(val, &phy1_ctrl->phy_con0);
  144. val = PHY_CON1_RESET_VAL;
  145. val &= ~(CTRL_GATEDURADJ_MASK);
  146. writel(val, &phy0_ctrl->phy_con1);
  147. writel(val, &phy1_ctrl->phy_con1);
  148. writel(CTRL_RDLVL_GATE_ENABLE, &dmc->rdlvl_config);
  149. i = TIMEOUT_US;
  150. while ((readl(&dmc->phystatus) &
  151. (RDLVL_COMPLETE_CHO | RDLVL_COMPLETE_CH1)) !=
  152. (RDLVL_COMPLETE_CHO | RDLVL_COMPLETE_CH1) && i > 0) {
  153. /*
  154. * TODO(waihong): Comment on how long this take to
  155. * timeout
  156. */
  157. sdelay(100);
  158. i--;
  159. }
  160. if (!i)
  161. return SETUP_ERR_RDLV_COMPLETE_TIMEOUT;
  162. writel(CTRL_RDLVL_GATE_DISABLE, &dmc->rdlvl_config);
  163. writel(0, &phy0_ctrl->phy_con14);
  164. writel(0, &phy1_ctrl->phy_con14);
  165. val = (mem->ctrl_start_point <<
  166. PHY_CON12_CTRL_START_POINT_SHIFT) |
  167. (mem->ctrl_inc << PHY_CON12_CTRL_INC_SHIFT) |
  168. (mem->ctrl_force << PHY_CON12_CTRL_FORCE_SHIFT) |
  169. (mem->ctrl_start << PHY_CON12_CTRL_START_SHIFT) |
  170. (mem->ctrl_dll_on << PHY_CON12_CTRL_DLL_ON_SHIFT) |
  171. (mem->ctrl_ref << PHY_CON12_CTRL_REF_SHIFT);
  172. writel(val, &phy0_ctrl->phy_con12);
  173. writel(val, &phy1_ctrl->phy_con12);
  174. update_reset_dll(&dmc->phycontrol0, DDR_MODE_DDR3);
  175. }
  176. /* Send PALL command */
  177. dmc_config_prech(mem, &dmc->directcmd);
  178. writel(mem->memcontrol, &dmc->memcontrol);
  179. /* Set DMC Concontrol and enable auto-refresh counter */
  180. writel(mem->concontrol | (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT)
  181. | (mem->aref_en << CONCONTROL_AREF_EN_SHIFT), &dmc->concontrol);
  182. return 0;
  183. }
  184. #endif
  185. #ifdef CONFIG_EXYNOS5420
  186. /**
  187. * RAM address to use in the test.
  188. *
  189. * We'll use 4 words at this address and 4 at this address + 0x80 (Ares
  190. * interleaves channels every 128 bytes). This will allow us to evaluate all of
  191. * the chips in a 1 chip per channel (2GB) system and half the chips in a 2
  192. * chip per channel (4GB) system. We can't test the 2nd chip since we need to
  193. * do tests before the 2nd chip is enabled. Looking at the 2nd chip isn't
  194. * critical because the 1st and 2nd chip have very similar timings (they'd
  195. * better have similar timings, since there's only a single adjustment that is
  196. * shared by both chips).
  197. */
  198. const unsigned int test_addr = CONFIG_SYS_SDRAM_BASE;
  199. /* Test pattern with which RAM will be tested */
  200. static const unsigned int test_pattern[] = {
  201. 0x5a5a5a5a,
  202. 0xa5a5a5a5,
  203. 0xf0f0f0f0,
  204. 0x0f0f0f0f,
  205. };
  206. /**
  207. * This function is a test vector for sw read leveling,
  208. * it compares the read data with the written data.
  209. *
  210. * @param ch DMC channel number
  211. * @param byte_lane which DQS byte offset,
  212. * possible values are 0,1,2,3
  213. * @return TRUE if memory was good, FALSE if not.
  214. */
  215. static bool dmc_valid_window_test_vector(int ch, int byte_lane)
  216. {
  217. unsigned int read_data;
  218. unsigned int mask;
  219. int i;
  220. mask = 0xFF << (8 * byte_lane);
  221. for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
  222. read_data = readl(test_addr + i * 4 + ch * 0x80);
  223. if ((read_data & mask) != (test_pattern[i] & mask))
  224. return false;
  225. }
  226. return true;
  227. }
  228. /**
  229. * This function returns current read offset value.
  230. *
  231. * @param phy_ctrl pointer to the current phy controller
  232. */
  233. static unsigned int dmc_get_read_offset_value(struct exynos5420_phy_control
  234. *phy_ctrl)
  235. {
  236. return readl(&phy_ctrl->phy_con4);
  237. }
  238. /**
  239. * This function performs resync, so that slave DLL is updated.
  240. *
  241. * @param phy_ctrl pointer to the current phy controller
  242. */
  243. static void ddr_phy_set_do_resync(struct exynos5420_phy_control *phy_ctrl)
  244. {
  245. setbits_le32(&phy_ctrl->phy_con10, PHY_CON10_CTRL_OFFSETR3);
  246. clrbits_le32(&phy_ctrl->phy_con10, PHY_CON10_CTRL_OFFSETR3);
  247. }
  248. /**
  249. * This function sets read offset value register with 'offset'.
  250. *
  251. * ...we also call call ddr_phy_set_do_resync().
  252. *
  253. * @param phy_ctrl pointer to the current phy controller
  254. * @param offset offset to read DQS
  255. */
  256. static void dmc_set_read_offset_value(struct exynos5420_phy_control *phy_ctrl,
  257. unsigned int offset)
  258. {
  259. writel(offset, &phy_ctrl->phy_con4);
  260. ddr_phy_set_do_resync(phy_ctrl);
  261. }
  262. /**
  263. * Convert a 2s complement byte to a byte with a sign bit.
  264. *
  265. * NOTE: you shouldn't use normal math on the number returned by this function.
  266. * As an example, -10 = 0xf6. After this function -10 = 0x8a. If you wanted
  267. * to do math and get the average of 10 and -10 (should be 0):
  268. * 0x8a + 0xa = 0x94 (-108)
  269. * 0x94 / 2 = 0xca (-54)
  270. * ...and 0xca = sign bit plus 0x4a, or -74
  271. *
  272. * Also note that you lose the ability to represent -128 since there are two
  273. * representations of 0.
  274. *
  275. * @param b The byte to convert in two's complement.
  276. * @return The 7-bit value + sign bit.
  277. */
  278. unsigned char make_signed_byte(signed char b)
  279. {
  280. if (b < 0)
  281. return 0x80 | -b;
  282. else
  283. return b;
  284. }
  285. /**
  286. * Test various shifts starting at 'start' and going to 'end'.
  287. *
  288. * For each byte lane, we'll walk through shift starting at 'start' and going
  289. * to 'end' (inclusive). When we are finally able to read the test pattern
  290. * we'll store the value in the results array.
  291. *
  292. * @param phy_ctrl pointer to the current phy controller
  293. * @param ch channel number
  294. * @param start the start shift. -127 to 127
  295. * @param end the end shift. -127 to 127
  296. * @param results we'll store results for each byte lane.
  297. */
  298. void test_shifts(struct exynos5420_phy_control *phy_ctrl, int ch,
  299. int start, int end, int results[NUM_BYTE_LANES])
  300. {
  301. int incr = (start < end) ? 1 : -1;
  302. int byte_lane;
  303. for (byte_lane = 0; byte_lane < NUM_BYTE_LANES; byte_lane++) {
  304. int shift;
  305. dmc_set_read_offset_value(phy_ctrl, DEFAULT_DQS_X4);
  306. results[byte_lane] = DEFAULT_DQS;
  307. for (shift = start; shift != (end + incr); shift += incr) {
  308. unsigned int byte_offsetr;
  309. unsigned int offsetr;
  310. byte_offsetr = make_signed_byte(shift);
  311. offsetr = dmc_get_read_offset_value(phy_ctrl);
  312. offsetr &= ~(0xFF << (8 * byte_lane));
  313. offsetr |= (byte_offsetr << (8 * byte_lane));
  314. dmc_set_read_offset_value(phy_ctrl, offsetr);
  315. if (dmc_valid_window_test_vector(ch, byte_lane)) {
  316. results[byte_lane] = shift;
  317. break;
  318. }
  319. }
  320. }
  321. }
  322. /**
  323. * This function performs SW read leveling to compensate DQ-DQS skew at
  324. * receiver it first finds the optimal read offset value on each DQS
  325. * then applies the value to PHY.
  326. *
  327. * Read offset value has its min margin and max margin. If read offset
  328. * value exceeds its min or max margin, read data will have corruption.
  329. * To avoid this we are doing sw read leveling.
  330. *
  331. * SW read leveling is:
  332. * 1> Finding offset value's left_limit and right_limit
  333. * 2> and calculate its center value
  334. * 3> finally programs that center value to PHY
  335. * 4> then PHY gets its optimal offset value.
  336. *
  337. * @param phy_ctrl pointer to the current phy controller
  338. * @param ch channel number
  339. * @param coarse_lock_val The coarse lock value read from PHY_CON13.
  340. * (0 - 0x7f)
  341. */
  342. static void software_find_read_offset(struct exynos5420_phy_control *phy_ctrl,
  343. int ch, unsigned int coarse_lock_val)
  344. {
  345. unsigned int offsetr_cent;
  346. int byte_lane;
  347. int left_limit;
  348. int right_limit;
  349. int left[NUM_BYTE_LANES];
  350. int right[NUM_BYTE_LANES];
  351. int i;
  352. /* Fill the memory with test patterns */
  353. for (i = 0; i < ARRAY_SIZE(test_pattern); i++)
  354. writel(test_pattern[i], test_addr + i * 4 + ch * 0x80);
  355. /* Figure out the limits we'll test with; keep -127 < limit < 127 */
  356. left_limit = DEFAULT_DQS - coarse_lock_val;
  357. right_limit = DEFAULT_DQS + coarse_lock_val;
  358. if (right_limit > 127)
  359. right_limit = 127;
  360. /* Fill in the location where reads were OK from left and right */
  361. test_shifts(phy_ctrl, ch, left_limit, right_limit, left);
  362. test_shifts(phy_ctrl, ch, right_limit, left_limit, right);
  363. /* Make a final value by taking the center between the left and right */
  364. offsetr_cent = 0;
  365. for (byte_lane = 0; byte_lane < NUM_BYTE_LANES; byte_lane++) {
  366. int temp_center;
  367. unsigned int vmwc;
  368. temp_center = (left[byte_lane] + right[byte_lane]) / 2;
  369. vmwc = make_signed_byte(temp_center);
  370. offsetr_cent |= vmwc << (8 * byte_lane);
  371. }
  372. dmc_set_read_offset_value(phy_ctrl, offsetr_cent);
  373. }
  374. int ddr3_mem_ctrl_init(struct mem_timings *mem, int reset)
  375. {
  376. struct exynos5420_clock *clk =
  377. (struct exynos5420_clock *)samsung_get_base_clock();
  378. struct exynos5420_power *power =
  379. (struct exynos5420_power *)samsung_get_base_power();
  380. struct exynos5420_phy_control *phy0_ctrl, *phy1_ctrl;
  381. struct exynos5420_dmc *drex0, *drex1;
  382. struct exynos5420_tzasc *tzasc0, *tzasc1;
  383. struct exynos5_power *pmu;
  384. uint32_t val, n_lock_r, n_lock_w_phy0, n_lock_w_phy1;
  385. uint32_t lock0_info, lock1_info;
  386. int chip;
  387. int i;
  388. phy0_ctrl = (struct exynos5420_phy_control *)samsung_get_base_dmc_phy();
  389. phy1_ctrl = (struct exynos5420_phy_control *)(samsung_get_base_dmc_phy()
  390. + DMC_OFFSET);
  391. drex0 = (struct exynos5420_dmc *)samsung_get_base_dmc_ctrl();
  392. drex1 = (struct exynos5420_dmc *)(samsung_get_base_dmc_ctrl()
  393. + DMC_OFFSET);
  394. tzasc0 = (struct exynos5420_tzasc *)samsung_get_base_dmc_tzasc();
  395. tzasc1 = (struct exynos5420_tzasc *)(samsung_get_base_dmc_tzasc()
  396. + DMC_OFFSET);
  397. pmu = (struct exynos5_power *)EXYNOS5420_POWER_BASE;
  398. /* Enable PAUSE for DREX */
  399. setbits_le32(&clk->pause, ENABLE_BIT);
  400. /* Enable BYPASS mode */
  401. setbits_le32(&clk->bpll_con1, BYPASS_EN);
  402. writel(MUX_BPLL_SEL_FOUTBPLL, &clk->src_cdrex);
  403. do {
  404. val = readl(&clk->mux_stat_cdrex);
  405. val &= BPLL_SEL_MASK;
  406. } while (val != FOUTBPLL);
  407. clrbits_le32(&clk->bpll_con1, BYPASS_EN);
  408. /* Specify the DDR memory type as DDR3 */
  409. val = readl(&phy0_ctrl->phy_con0);
  410. val &= ~(PHY_CON0_CTRL_DDR_MODE_MASK << PHY_CON0_CTRL_DDR_MODE_SHIFT);
  411. val |= (DDR_MODE_DDR3 << PHY_CON0_CTRL_DDR_MODE_SHIFT);
  412. writel(val, &phy0_ctrl->phy_con0);
  413. val = readl(&phy1_ctrl->phy_con0);
  414. val &= ~(PHY_CON0_CTRL_DDR_MODE_MASK << PHY_CON0_CTRL_DDR_MODE_SHIFT);
  415. val |= (DDR_MODE_DDR3 << PHY_CON0_CTRL_DDR_MODE_SHIFT);
  416. writel(val, &phy1_ctrl->phy_con0);
  417. /* Set Read Latency and Burst Length for PHY0 and PHY1 */
  418. val = (mem->ctrl_bstlen << PHY_CON42_CTRL_BSTLEN_SHIFT) |
  419. (mem->ctrl_rdlat << PHY_CON42_CTRL_RDLAT_SHIFT);
  420. writel(val, &phy0_ctrl->phy_con42);
  421. writel(val, &phy1_ctrl->phy_con42);
  422. val = readl(&phy0_ctrl->phy_con26);
  423. val &= ~(T_WRDATA_EN_MASK << T_WRDATA_EN_OFFSET);
  424. val |= (T_WRDATA_EN_DDR3 << T_WRDATA_EN_OFFSET);
  425. writel(val, &phy0_ctrl->phy_con26);
  426. val = readl(&phy1_ctrl->phy_con26);
  427. val &= ~(T_WRDATA_EN_MASK << T_WRDATA_EN_OFFSET);
  428. val |= (T_WRDATA_EN_DDR3 << T_WRDATA_EN_OFFSET);
  429. writel(val, &phy1_ctrl->phy_con26);
  430. /*
  431. * Set Driver strength for CK, CKE, CS & CA to 0x7
  432. * Set Driver strength for Data Slice 0~3 to 0x7
  433. */
  434. val = (0x7 << CA_CK_DRVR_DS_OFFSET) | (0x7 << CA_CKE_DRVR_DS_OFFSET) |
  435. (0x7 << CA_CS_DRVR_DS_OFFSET) | (0x7 << CA_ADR_DRVR_DS_OFFSET);
  436. val |= (0x7 << DA_3_DS_OFFSET) | (0x7 << DA_2_DS_OFFSET) |
  437. (0x7 << DA_1_DS_OFFSET) | (0x7 << DA_0_DS_OFFSET);
  438. writel(val, &phy0_ctrl->phy_con39);
  439. writel(val, &phy1_ctrl->phy_con39);
  440. /* ZQ Calibration */
  441. if (dmc_config_zq(mem, &phy0_ctrl->phy_con16, &phy1_ctrl->phy_con16,
  442. &phy0_ctrl->phy_con17, &phy1_ctrl->phy_con17))
  443. return SETUP_ERR_ZQ_CALIBRATION_FAILURE;
  444. clrbits_le32(&phy0_ctrl->phy_con16, ZQ_CLK_DIV_EN);
  445. clrbits_le32(&phy1_ctrl->phy_con16, ZQ_CLK_DIV_EN);
  446. /* DQ Signal */
  447. val = readl(&phy0_ctrl->phy_con14);
  448. val |= mem->phy0_pulld_dqs;
  449. writel(val, &phy0_ctrl->phy_con14);
  450. val = readl(&phy1_ctrl->phy_con14);
  451. val |= mem->phy1_pulld_dqs;
  452. writel(val, &phy1_ctrl->phy_con14);
  453. val = MEM_TERM_EN | PHY_TERM_EN;
  454. writel(val, &drex0->phycontrol0);
  455. writel(val, &drex1->phycontrol0);
  456. writel(mem->concontrol |
  457. (mem->dfi_init_start << CONCONTROL_DFI_INIT_START_SHIFT) |
  458. (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT),
  459. &drex0->concontrol);
  460. writel(mem->concontrol |
  461. (mem->dfi_init_start << CONCONTROL_DFI_INIT_START_SHIFT) |
  462. (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT),
  463. &drex1->concontrol);
  464. do {
  465. val = readl(&drex0->phystatus);
  466. } while ((val & DFI_INIT_COMPLETE) != DFI_INIT_COMPLETE);
  467. do {
  468. val = readl(&drex1->phystatus);
  469. } while ((val & DFI_INIT_COMPLETE) != DFI_INIT_COMPLETE);
  470. clrbits_le32(&drex0->concontrol, DFI_INIT_START);
  471. clrbits_le32(&drex1->concontrol, DFI_INIT_START);
  472. update_reset_dll(&drex0->phycontrol0, DDR_MODE_DDR3);
  473. update_reset_dll(&drex1->phycontrol0, DDR_MODE_DDR3);
  474. /*
  475. * Set Base Address:
  476. * 0x2000_0000 ~ 0x5FFF_FFFF
  477. * 0x6000_0000 ~ 0x9FFF_FFFF
  478. */
  479. /* MEMBASECONFIG0 */
  480. val = DMC_MEMBASECONFIGX_CHIP_BASE(DMC_CHIP_BASE_0) |
  481. DMC_MEMBASECONFIGX_CHIP_MASK(DMC_CHIP_MASK);
  482. writel(val, &tzasc0->membaseconfig0);
  483. writel(val, &tzasc1->membaseconfig0);
  484. /* MEMBASECONFIG1 */
  485. val = DMC_MEMBASECONFIGX_CHIP_BASE(DMC_CHIP_BASE_1) |
  486. DMC_MEMBASECONFIGX_CHIP_MASK(DMC_CHIP_MASK);
  487. writel(val, &tzasc0->membaseconfig1);
  488. writel(val, &tzasc1->membaseconfig1);
  489. /*
  490. * Memory Channel Inteleaving Size
  491. * Ares Channel interleaving = 128 bytes
  492. */
  493. /* MEMCONFIG0/1 */
  494. writel(mem->memconfig, &tzasc0->memconfig0);
  495. writel(mem->memconfig, &tzasc1->memconfig0);
  496. writel(mem->memconfig, &tzasc0->memconfig1);
  497. writel(mem->memconfig, &tzasc1->memconfig1);
  498. /* Precharge Configuration */
  499. writel(mem->prechconfig_tp_cnt << PRECHCONFIG_TP_CNT_SHIFT,
  500. &drex0->prechconfig0);
  501. writel(mem->prechconfig_tp_cnt << PRECHCONFIG_TP_CNT_SHIFT,
  502. &drex1->prechconfig0);
  503. /*
  504. * TimingRow, TimingData, TimingPower and Timingaref
  505. * values as per Memory AC parameters
  506. */
  507. writel(mem->timing_ref, &drex0->timingref);
  508. writel(mem->timing_ref, &drex1->timingref);
  509. writel(mem->timing_row, &drex0->timingrow0);
  510. writel(mem->timing_row, &drex1->timingrow0);
  511. writel(mem->timing_data, &drex0->timingdata0);
  512. writel(mem->timing_data, &drex1->timingdata0);
  513. writel(mem->timing_power, &drex0->timingpower0);
  514. writel(mem->timing_power, &drex1->timingpower0);
  515. if (reset) {
  516. /*
  517. * Send NOP, MRS and ZQINIT commands
  518. * Sending MRS command will reset the DRAM. We should not be
  519. * reseting the DRAM after resume, this will lead to memory
  520. * corruption as DRAM content is lost after DRAM reset
  521. */
  522. dmc_config_mrs(mem, &drex0->directcmd);
  523. dmc_config_mrs(mem, &drex1->directcmd);
  524. }
  525. /*
  526. * Get PHY_CON13 from both phys. Gate CLKM around reading since
  527. * PHY_CON13 is glitchy when CLKM is running. We're paranoid and
  528. * wait until we get a "fine lock", though a coarse lock is probably
  529. * OK (we only use the coarse numbers below). We try to gate the
  530. * clock for as short a time as possible in case SDRAM is somehow
  531. * sensitive. sdelay(10) in the loop is arbitrary to make sure
  532. * there is some time for PHY_CON13 to get updated. In practice
  533. * no delay appears to be needed.
  534. */
  535. val = readl(&clk->gate_bus_cdrex);
  536. while (true) {
  537. writel(val & ~0x1, &clk->gate_bus_cdrex);
  538. lock0_info = readl(&phy0_ctrl->phy_con13);
  539. writel(val, &clk->gate_bus_cdrex);
  540. if ((lock0_info & CTRL_FINE_LOCKED) == CTRL_FINE_LOCKED)
  541. break;
  542. sdelay(10);
  543. }
  544. while (true) {
  545. writel(val & ~0x2, &clk->gate_bus_cdrex);
  546. lock1_info = readl(&phy1_ctrl->phy_con13);
  547. writel(val, &clk->gate_bus_cdrex);
  548. if ((lock1_info & CTRL_FINE_LOCKED) == CTRL_FINE_LOCKED)
  549. break;
  550. sdelay(10);
  551. }
  552. if (!reset) {
  553. /*
  554. * During Suspend-Resume & S/W-Reset, as soon as PMU releases
  555. * pad retention, CKE goes high. This causes memory contents
  556. * not to be retained during DRAM initialization. Therfore,
  557. * there is a new control register(0x100431e8[28]) which lets us
  558. * release pad retention and retain the memory content until the
  559. * initialization is complete.
  560. */
  561. writel(PAD_RETENTION_DRAM_COREBLK_VAL,
  562. &power->pad_retention_dram_coreblk_option);
  563. do {
  564. val = readl(&power->pad_retention_dram_status);
  565. } while (val != 0x1);
  566. /*
  567. * CKE PAD retention disables DRAM self-refresh mode.
  568. * Send auto refresh command for DRAM refresh.
  569. */
  570. for (i = 0; i < 128; i++) {
  571. for (chip = 0; chip < mem->chips_to_configure; chip++) {
  572. writel(DIRECT_CMD_REFA |
  573. (chip << DIRECT_CMD_CHIP_SHIFT),
  574. &drex0->directcmd);
  575. writel(DIRECT_CMD_REFA |
  576. (chip << DIRECT_CMD_CHIP_SHIFT),
  577. &drex1->directcmd);
  578. }
  579. }
  580. }
  581. if (mem->gate_leveling_enable) {
  582. writel(PHY_CON0_RESET_VAL, &phy0_ctrl->phy_con0);
  583. writel(PHY_CON0_RESET_VAL, &phy1_ctrl->phy_con0);
  584. setbits_le32(&phy0_ctrl->phy_con0, P0_CMD_EN);
  585. setbits_le32(&phy1_ctrl->phy_con0, P0_CMD_EN);
  586. val = PHY_CON2_RESET_VAL;
  587. val |= INIT_DESKEW_EN;
  588. writel(val, &phy0_ctrl->phy_con2);
  589. writel(val, &phy1_ctrl->phy_con2);
  590. val = readl(&phy0_ctrl->phy_con1);
  591. val |= (RDLVL_PASS_ADJ_VAL << RDLVL_PASS_ADJ_OFFSET);
  592. writel(val, &phy0_ctrl->phy_con1);
  593. val = readl(&phy1_ctrl->phy_con1);
  594. val |= (RDLVL_PASS_ADJ_VAL << RDLVL_PASS_ADJ_OFFSET);
  595. writel(val, &phy1_ctrl->phy_con1);
  596. n_lock_w_phy0 = (lock0_info & CTRL_LOCK_COARSE_MASK) >> 2;
  597. n_lock_r = readl(&phy0_ctrl->phy_con12);
  598. n_lock_r &= ~CTRL_DLL_ON;
  599. n_lock_r |= n_lock_w_phy0;
  600. writel(n_lock_r, &phy0_ctrl->phy_con12);
  601. n_lock_w_phy1 = (lock1_info & CTRL_LOCK_COARSE_MASK) >> 2;
  602. n_lock_r = readl(&phy1_ctrl->phy_con12);
  603. n_lock_r &= ~CTRL_DLL_ON;
  604. n_lock_r |= n_lock_w_phy1;
  605. writel(n_lock_r, &phy1_ctrl->phy_con12);
  606. val = (0x3 << DIRECT_CMD_BANK_SHIFT) | 0x4;
  607. for (chip = 0; chip < mem->chips_to_configure; chip++) {
  608. writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
  609. &drex0->directcmd);
  610. writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
  611. &drex1->directcmd);
  612. }
  613. setbits_le32(&phy0_ctrl->phy_con2, RDLVL_GATE_EN);
  614. setbits_le32(&phy1_ctrl->phy_con2, RDLVL_GATE_EN);
  615. setbits_le32(&phy0_ctrl->phy_con0, CTRL_SHGATE);
  616. setbits_le32(&phy1_ctrl->phy_con0, CTRL_SHGATE);
  617. val = readl(&phy0_ctrl->phy_con1);
  618. val &= ~(CTRL_GATEDURADJ_MASK);
  619. writel(val, &phy0_ctrl->phy_con1);
  620. val = readl(&phy1_ctrl->phy_con1);
  621. val &= ~(CTRL_GATEDURADJ_MASK);
  622. writel(val, &phy1_ctrl->phy_con1);
  623. writel(CTRL_RDLVL_GATE_ENABLE, &drex0->rdlvl_config);
  624. i = TIMEOUT_US;
  625. while (((readl(&drex0->phystatus) & RDLVL_COMPLETE_CHO) !=
  626. RDLVL_COMPLETE_CHO) && (i > 0)) {
  627. /*
  628. * TODO(waihong): Comment on how long this take to
  629. * timeout
  630. */
  631. sdelay(100);
  632. i--;
  633. }
  634. if (!i)
  635. return SETUP_ERR_RDLV_COMPLETE_TIMEOUT;
  636. writel(CTRL_RDLVL_GATE_DISABLE, &drex0->rdlvl_config);
  637. writel(CTRL_RDLVL_GATE_ENABLE, &drex1->rdlvl_config);
  638. i = TIMEOUT_US;
  639. while (((readl(&drex1->phystatus) & RDLVL_COMPLETE_CHO) !=
  640. RDLVL_COMPLETE_CHO) && (i > 0)) {
  641. /*
  642. * TODO(waihong): Comment on how long this take to
  643. * timeout
  644. */
  645. sdelay(100);
  646. i--;
  647. }
  648. if (!i)
  649. return SETUP_ERR_RDLV_COMPLETE_TIMEOUT;
  650. writel(CTRL_RDLVL_GATE_DISABLE, &drex1->rdlvl_config);
  651. writel(0, &phy0_ctrl->phy_con14);
  652. writel(0, &phy1_ctrl->phy_con14);
  653. val = (0x3 << DIRECT_CMD_BANK_SHIFT);
  654. for (chip = 0; chip < mem->chips_to_configure; chip++) {
  655. writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
  656. &drex0->directcmd);
  657. writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
  658. &drex1->directcmd);
  659. }
  660. /* Common Settings for Leveling */
  661. val = PHY_CON12_RESET_VAL;
  662. writel((val + n_lock_w_phy0), &phy0_ctrl->phy_con12);
  663. writel((val + n_lock_w_phy1), &phy1_ctrl->phy_con12);
  664. setbits_le32(&phy0_ctrl->phy_con2, DLL_DESKEW_EN);
  665. setbits_le32(&phy1_ctrl->phy_con2, DLL_DESKEW_EN);
  666. }
  667. /*
  668. * Do software read leveling
  669. *
  670. * Do this before we turn on auto refresh since the auto refresh can
  671. * be in conflict with the resync operation that's part of setting
  672. * read leveling.
  673. */
  674. if (!reset) {
  675. /* restore calibrated value after resume */
  676. dmc_set_read_offset_value(phy0_ctrl, readl(&pmu->pmu_spare1));
  677. dmc_set_read_offset_value(phy1_ctrl, readl(&pmu->pmu_spare2));
  678. } else {
  679. software_find_read_offset(phy0_ctrl, 0,
  680. CTRL_LOCK_COARSE(lock0_info));
  681. software_find_read_offset(phy1_ctrl, 1,
  682. CTRL_LOCK_COARSE(lock1_info));
  683. /* save calibrated value to restore after resume */
  684. writel(dmc_get_read_offset_value(phy0_ctrl), &pmu->pmu_spare1);
  685. writel(dmc_get_read_offset_value(phy1_ctrl), &pmu->pmu_spare2);
  686. }
  687. /* Send PALL command */
  688. dmc_config_prech(mem, &drex0->directcmd);
  689. dmc_config_prech(mem, &drex1->directcmd);
  690. writel(mem->memcontrol, &drex0->memcontrol);
  691. writel(mem->memcontrol, &drex1->memcontrol);
  692. /*
  693. * Set DMC Concontrol: Enable auto-refresh counter, provide
  694. * read data fetch cycles and enable DREX auto set powerdown
  695. * for input buffer of I/O in none read memory state.
  696. */
  697. writel(mem->concontrol | (mem->aref_en << CONCONTROL_AREF_EN_SHIFT) |
  698. (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT)|
  699. DMC_CONCONTROL_IO_PD_CON(0x2),
  700. &drex0->concontrol);
  701. writel(mem->concontrol | (mem->aref_en << CONCONTROL_AREF_EN_SHIFT) |
  702. (mem->rd_fetch << CONCONTROL_RD_FETCH_SHIFT)|
  703. DMC_CONCONTROL_IO_PD_CON(0x2),
  704. &drex1->concontrol);
  705. /*
  706. * Enable Clock Gating Control for DMC
  707. * this saves around 25 mw dmc power as compared to the power
  708. * consumption without these bits enabled
  709. */
  710. setbits_le32(&drex0->cgcontrol, DMC_INTERNAL_CG);
  711. setbits_le32(&drex1->cgcontrol, DMC_INTERNAL_CG);
  712. return 0;
  713. }
  714. #endif