sdram_s10.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2016-2018 Intel Corporation <www.intel.com>
  4. *
  5. */
  6. #include <common.h>
  7. #include <errno.h>
  8. #include <div64.h>
  9. #include <asm/io.h>
  10. #include <wait_bit.h>
  11. #include <asm/arch/firewall_s10.h>
  12. #include <asm/arch/sdram_s10.h>
  13. #include <asm/arch/system_manager.h>
  14. #include <asm/arch/reset_manager.h>
  15. DECLARE_GLOBAL_DATA_PTR;
  16. static const struct socfpga_system_manager *sysmgr_regs =
  17. (void *)SOCFPGA_SYSMGR_ADDRESS;
  18. #define DDR_CONFIG(A, B, C, R) (((A) << 24) | ((B) << 16) | ((C) << 8) | (R))
  19. /* The followring are the supported configurations */
  20. u32 ddr_config[] = {
  21. /* DDR_CONFIG(Address order,Bank,Column,Row) */
  22. /* List for DDR3 or LPDDR3 (pinout order > chip, row, bank, column) */
  23. DDR_CONFIG(0, 3, 10, 12),
  24. DDR_CONFIG(0, 3, 9, 13),
  25. DDR_CONFIG(0, 3, 10, 13),
  26. DDR_CONFIG(0, 3, 9, 14),
  27. DDR_CONFIG(0, 3, 10, 14),
  28. DDR_CONFIG(0, 3, 10, 15),
  29. DDR_CONFIG(0, 3, 11, 14),
  30. DDR_CONFIG(0, 3, 11, 15),
  31. DDR_CONFIG(0, 3, 10, 16),
  32. DDR_CONFIG(0, 3, 11, 16),
  33. DDR_CONFIG(0, 3, 12, 15), /* 0xa */
  34. /* List for DDR4 only (pinout order > chip, bank, row, column) */
  35. DDR_CONFIG(1, 3, 10, 14),
  36. DDR_CONFIG(1, 4, 10, 14),
  37. DDR_CONFIG(1, 3, 10, 15),
  38. DDR_CONFIG(1, 4, 10, 15),
  39. DDR_CONFIG(1, 3, 10, 16),
  40. DDR_CONFIG(1, 4, 10, 16),
  41. DDR_CONFIG(1, 3, 10, 17),
  42. DDR_CONFIG(1, 4, 10, 17),
  43. };
  44. static u32 hmc_readl(u32 reg)
  45. {
  46. return readl(((void __iomem *)SOCFPGA_HMC_MMR_IO48_ADDRESS + (reg)));
  47. }
  48. static u32 hmc_ecc_readl(u32 reg)
  49. {
  50. return readl((void __iomem *)SOCFPGA_SDR_ADDRESS + (reg));
  51. }
  52. static u32 hmc_ecc_writel(u32 data, u32 reg)
  53. {
  54. return writel(data, (void __iomem *)SOCFPGA_SDR_ADDRESS + (reg));
  55. }
  56. static u32 ddr_sch_writel(u32 data, u32 reg)
  57. {
  58. return writel(data,
  59. (void __iomem *)SOCFPGA_SDR_SCHEDULER_ADDRESS + (reg));
  60. }
  61. int match_ddr_conf(u32 ddr_conf)
  62. {
  63. int i;
  64. for (i = 0; i < ARRAY_SIZE(ddr_config); i++) {
  65. if (ddr_conf == ddr_config[i])
  66. return i;
  67. }
  68. return 0;
  69. }
  70. static int emif_clear(void)
  71. {
  72. hmc_ecc_writel(0, RSTHANDSHAKECTRL);
  73. return wait_for_bit_le32((const void *)(SOCFPGA_SDR_ADDRESS +
  74. RSTHANDSHAKESTAT),
  75. DDR_HMC_RSTHANDSHAKE_MASK,
  76. false, 1000, false);
  77. }
  78. static int emif_reset(void)
  79. {
  80. u32 c2s, s2c, ret;
  81. c2s = hmc_ecc_readl(RSTHANDSHAKECTRL) & DDR_HMC_RSTHANDSHAKE_MASK;
  82. s2c = hmc_ecc_readl(RSTHANDSHAKESTAT) & DDR_HMC_RSTHANDSHAKE_MASK;
  83. debug("DDR: c2s=%08x s2c=%08x nr0=%08x nr1=%08x nr2=%08x dst=%08x\n",
  84. c2s, s2c, hmc_readl(NIOSRESERVED0), hmc_readl(NIOSRESERVED1),
  85. hmc_readl(NIOSRESERVED2), hmc_readl(DRAMSTS));
  86. if (s2c && emif_clear()) {
  87. printf("DDR: emif_clear() failed\n");
  88. return -1;
  89. }
  90. debug("DDR: Triggerring emif reset\n");
  91. hmc_ecc_writel(DDR_HMC_CORE2SEQ_INT_REQ, RSTHANDSHAKECTRL);
  92. /* if seq2core[3] = 0, we are good */
  93. ret = wait_for_bit_le32((const void *)(SOCFPGA_SDR_ADDRESS +
  94. RSTHANDSHAKESTAT),
  95. DDR_HMC_SEQ2CORE_INT_RESP_MASK,
  96. false, 1000, false);
  97. if (ret) {
  98. printf("DDR: failed to get ack from EMIF\n");
  99. return ret;
  100. }
  101. ret = emif_clear();
  102. if (ret) {
  103. printf("DDR: emif_clear() failed\n");
  104. return ret;
  105. }
  106. debug("DDR: %s triggered successly\n", __func__);
  107. return 0;
  108. }
  109. static int poll_hmc_clock_status(void)
  110. {
  111. return wait_for_bit_le32(&sysmgr_regs->hmc_clk,
  112. SYSMGR_HMC_CLK_STATUS_MSK, true, 1000, false);
  113. }
  114. /**
  115. * sdram_mmr_init_full() - Function to initialize SDRAM MMR
  116. *
  117. * Initialize the SDRAM MMR.
  118. */
  119. int sdram_mmr_init_full(unsigned int unused)
  120. {
  121. u32 update_value, io48_value, ddrioctl;
  122. u32 i;
  123. int ret;
  124. /* Enable access to DDR from CPU master */
  125. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_DDRREG),
  126. CCU_ADBASE_DI_MASK);
  127. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE0),
  128. CCU_ADBASE_DI_MASK);
  129. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1A),
  130. CCU_ADBASE_DI_MASK);
  131. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1B),
  132. CCU_ADBASE_DI_MASK);
  133. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1C),
  134. CCU_ADBASE_DI_MASK);
  135. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1D),
  136. CCU_ADBASE_DI_MASK);
  137. clrbits_le32(CCU_REG_ADDR(CCU_CPU0_MPRT_ADBASE_MEMSPACE1E),
  138. CCU_ADBASE_DI_MASK);
  139. /* Enable access to DDR from IO master */
  140. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE0),
  141. CCU_ADBASE_DI_MASK);
  142. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1A),
  143. CCU_ADBASE_DI_MASK);
  144. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1B),
  145. CCU_ADBASE_DI_MASK);
  146. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1C),
  147. CCU_ADBASE_DI_MASK);
  148. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1D),
  149. CCU_ADBASE_DI_MASK);
  150. clrbits_le32(CCU_REG_ADDR(CCU_IOM_MPRT_ADBASE_MEMSPACE1E),
  151. CCU_ADBASE_DI_MASK);
  152. /* this enables nonsecure access to DDR */
  153. /* mpuregion0addr_limit */
  154. FW_MPU_DDR_SCR_WRITEL(0xFFFF0000, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMIT);
  155. FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_MPUREGION0ADDR_LIMITEXT);
  156. /* nonmpuregion0addr_limit */
  157. FW_MPU_DDR_SCR_WRITEL(0xFFFF0000,
  158. FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMIT);
  159. FW_MPU_DDR_SCR_WRITEL(0x1F, FW_MPU_DDR_SCR_NONMPUREGION0ADDR_LIMITEXT);
  160. /* Enable mpuregion0enable and nonmpuregion0enable */
  161. FW_MPU_DDR_SCR_WRITEL(MPUREGION0_ENABLE | NONMPUREGION0_ENABLE,
  162. FW_MPU_DDR_SCR_EN_SET);
  163. /* Ensure HMC clock is running */
  164. if (poll_hmc_clock_status()) {
  165. puts("DDR: Error as HMC clock not running\n");
  166. return -1;
  167. }
  168. /* release DDR scheduler from reset */
  169. socfpga_per_reset(SOCFPGA_RESET(SDR), 0);
  170. /* Try 3 times to do a calibration */
  171. for (i = 0; i < 3; i++) {
  172. ret = wait_for_bit_le32((const void *)(SOCFPGA_SDR_ADDRESS +
  173. DDRCALSTAT),
  174. DDR_HMC_DDRCALSTAT_CAL_MSK, true, 1000,
  175. false);
  176. if (!ret)
  177. break;
  178. emif_reset();
  179. }
  180. if (ret) {
  181. puts("DDR: Error as SDRAM calibration failed\n");
  182. return -1;
  183. }
  184. debug("DDR: Calibration success\n");
  185. u32 ctrlcfg0 = hmc_readl(CTRLCFG0);
  186. u32 ctrlcfg1 = hmc_readl(CTRLCFG1);
  187. u32 dramaddrw = hmc_readl(DRAMADDRW);
  188. u32 dramtim0 = hmc_readl(DRAMTIMING0);
  189. u32 caltim0 = hmc_readl(CALTIMING0);
  190. u32 caltim1 = hmc_readl(CALTIMING1);
  191. u32 caltim2 = hmc_readl(CALTIMING2);
  192. u32 caltim3 = hmc_readl(CALTIMING3);
  193. u32 caltim4 = hmc_readl(CALTIMING4);
  194. u32 caltim9 = hmc_readl(CALTIMING9);
  195. /*
  196. * Configure the DDR IO size [0xFFCFB008]
  197. * niosreserve0: Used to indicate DDR width &
  198. * bit[7:0] = Number of data bits (bit[6:5] 0x01=32bit, 0x10=64bit)
  199. * bit[8] = 1 if user-mode OCT is present
  200. * bit[9] = 1 if warm reset compiled into EMIF Cal Code
  201. * bit[10] = 1 if warm reset is on during generation in EMIF Cal
  202. * niosreserve1: IP ADCDS version encoded as 16 bit value
  203. * bit[2:0] = Variant (0=not special,1=FAE beta, 2=Customer beta,
  204. * 3=EAP, 4-6 are reserved)
  205. * bit[5:3] = Service Pack # (e.g. 1)
  206. * bit[9:6] = Minor Release #
  207. * bit[14:10] = Major Release #
  208. */
  209. update_value = hmc_readl(NIOSRESERVED0);
  210. hmc_ecc_writel(((update_value & 0xFF) >> 5), DDRIOCTRL);
  211. ddrioctl = hmc_ecc_readl(DDRIOCTRL);
  212. /* enable HPS interface to HMC */
  213. hmc_ecc_writel(DDR_HMC_HPSINTFCSEL_ENABLE_MASK, HPSINTFCSEL);
  214. /* Set the DDR Configuration */
  215. io48_value = DDR_CONFIG(CTRLCFG1_CFG_ADDR_ORDER(ctrlcfg1),
  216. (DRAMADDRW_CFG_BANK_ADDR_WIDTH(dramaddrw) +
  217. DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(dramaddrw)),
  218. DRAMADDRW_CFG_COL_ADDR_WIDTH(dramaddrw),
  219. DRAMADDRW_CFG_ROW_ADDR_WIDTH(dramaddrw));
  220. update_value = match_ddr_conf(io48_value);
  221. if (update_value)
  222. ddr_sch_writel(update_value, DDR_SCH_DDRCONF);
  223. /* Configure HMC dramaddrw */
  224. hmc_ecc_writel(hmc_readl(DRAMADDRW), DRAMADDRWIDTH);
  225. /*
  226. * Configure DDR timing
  227. * RDTOMISS = tRTP + tRP + tRCD - BL/2
  228. * WRTOMISS = WL + tWR + tRP + tRCD and
  229. * WL = RL + BL/2 + 2 - rd-to-wr ; tWR = 15ns so...
  230. * First part of equation is in memory clock units so divide by 2
  231. * for HMC clock units. 1066MHz is close to 1ns so use 15 directly.
  232. * WRTOMISS = ((RL + BL/2 + 2 + tWR) >> 1)- rd-to-wr + tRP + tRCD
  233. */
  234. u32 burst_len = CTRLCFG0_CFG_CTRL_BURST_LEN(ctrlcfg0);
  235. update_value = CALTIMING2_CFG_RD_TO_WR_PCH(caltim2) +
  236. CALTIMING4_CFG_PCH_TO_VALID(caltim4) +
  237. CALTIMING0_CFG_ACT_TO_RDWR(caltim0) -
  238. (burst_len >> 2);
  239. io48_value = (((DRAMTIMING0_CFG_TCL(dramtim0) + 2 + DDR_TWR +
  240. (burst_len >> 1)) >> 1) -
  241. /* Up to here was in memory cycles so divide by 2 */
  242. CALTIMING1_CFG_RD_TO_WR(caltim1) +
  243. CALTIMING0_CFG_ACT_TO_RDWR(caltim0) +
  244. CALTIMING4_CFG_PCH_TO_VALID(caltim4));
  245. ddr_sch_writel(((CALTIMING0_CFG_ACT_TO_ACT(caltim0) <<
  246. DDR_SCH_DDRTIMING_ACTTOACT_OFF) |
  247. (update_value << DDR_SCH_DDRTIMING_RDTOMISS_OFF) |
  248. (io48_value << DDR_SCH_DDRTIMING_WRTOMISS_OFF) |
  249. ((burst_len >> 2) << DDR_SCH_DDRTIMING_BURSTLEN_OFF) |
  250. (CALTIMING1_CFG_RD_TO_WR(caltim1) <<
  251. DDR_SCH_DDRTIMING_RDTOWR_OFF) |
  252. (CALTIMING3_CFG_WR_TO_RD(caltim3) <<
  253. DDR_SCH_DDRTIMING_WRTORD_OFF) |
  254. (((ddrioctl == 1) ? 1 : 0) <<
  255. DDR_SCH_DDRTIMING_BWRATIO_OFF)),
  256. DDR_SCH_DDRTIMING);
  257. /* Configure DDR mode [precharge = 0] */
  258. ddr_sch_writel(((ddrioctl ? 0 : 1) <<
  259. DDR_SCH_DDRMOD_BWRATIOEXTENDED_OFF),
  260. DDR_SCH_DDRMODE);
  261. /* Configure the read latency */
  262. ddr_sch_writel((DRAMTIMING0_CFG_TCL(dramtim0) >> 1) +
  263. DDR_READ_LATENCY_DELAY,
  264. DDR_SCH_READ_LATENCY);
  265. /*
  266. * Configuring timing values concerning activate commands
  267. * [FAWBANK alway 1 because always 4 bank DDR]
  268. */
  269. ddr_sch_writel(((CALTIMING0_CFG_ACT_TO_ACT_DB(caltim0) <<
  270. DDR_SCH_ACTIVATE_RRD_OFF) |
  271. (CALTIMING9_CFG_4_ACT_TO_ACT(caltim9) <<
  272. DDR_SCH_ACTIVATE_FAW_OFF) |
  273. (DDR_ACTIVATE_FAWBANK <<
  274. DDR_SCH_ACTIVATE_FAWBANK_OFF)),
  275. DDR_SCH_ACTIVATE);
  276. /*
  277. * Configuring timing values concerning device to device data bus
  278. * ownership change
  279. */
  280. ddr_sch_writel(((CALTIMING1_CFG_RD_TO_RD_DC(caltim1) <<
  281. DDR_SCH_DEVTODEV_BUSRDTORD_OFF) |
  282. (CALTIMING1_CFG_RD_TO_WR_DC(caltim1) <<
  283. DDR_SCH_DEVTODEV_BUSRDTOWR_OFF) |
  284. (CALTIMING3_CFG_WR_TO_RD_DC(caltim3) <<
  285. DDR_SCH_DEVTODEV_BUSWRTORD_OFF)),
  286. DDR_SCH_DEVTODEV);
  287. /* assigning the SDRAM size */
  288. unsigned long long size = sdram_calculate_size();
  289. /* If the size is invalid, use default Config size */
  290. if (size <= 0)
  291. gd->ram_size = PHYS_SDRAM_1_SIZE;
  292. else
  293. gd->ram_size = size;
  294. /* Enable or disable the SDRAM ECC */
  295. if (CTRLCFG1_CFG_CTRL_EN_ECC(ctrlcfg1)) {
  296. setbits_le32(SOCFPGA_SDR_ADDRESS + ECCCTRL1,
  297. (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
  298. DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
  299. DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
  300. clrbits_le32(SOCFPGA_SDR_ADDRESS + ECCCTRL1,
  301. (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
  302. DDR_HMC_ECCCTL_CNT_RST_SET_MSK));
  303. setbits_le32(SOCFPGA_SDR_ADDRESS + ECCCTRL2,
  304. (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
  305. DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
  306. } else {
  307. clrbits_le32(SOCFPGA_SDR_ADDRESS + ECCCTRL1,
  308. (DDR_HMC_ECCCTL_AWB_CNT_RST_SET_MSK |
  309. DDR_HMC_ECCCTL_CNT_RST_SET_MSK |
  310. DDR_HMC_ECCCTL_ECC_EN_SET_MSK));
  311. clrbits_le32(SOCFPGA_SDR_ADDRESS + ECCCTRL2,
  312. (DDR_HMC_ECCCTL2_RMW_EN_SET_MSK |
  313. DDR_HMC_ECCCTL2_AWB_EN_SET_MSK));
  314. }
  315. debug("DDR: HMC init success\n");
  316. return 0;
  317. }
  318. /**
  319. * sdram_calculate_size() - Calculate SDRAM size
  320. *
  321. * Calculate SDRAM device size based on SDRAM controller parameters.
  322. * Size is specified in bytes.
  323. */
  324. phys_size_t sdram_calculate_size(void)
  325. {
  326. u32 dramaddrw = hmc_readl(DRAMADDRW);
  327. phys_size_t size = 1 << (DRAMADDRW_CFG_CS_ADDR_WIDTH(dramaddrw) +
  328. DRAMADDRW_CFG_BANK_GRP_ADDR_WIDTH(dramaddrw) +
  329. DRAMADDRW_CFG_BANK_ADDR_WIDTH(dramaddrw) +
  330. DRAMADDRW_CFG_ROW_ADDR_WIDTH(dramaddrw) +
  331. DRAMADDRW_CFG_COL_ADDR_WIDTH(dramaddrw));
  332. size *= (2 << (hmc_ecc_readl(DDRIOCTRL) &
  333. DDR_HMC_DDRIOCTRL_IOSIZE_MSK));
  334. return size;
  335. }