mrc.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (C) 2013, Intel Corporation
  3. * Copyright (C) 2015, Bin Meng <bmeng.cn@gmail.com>
  4. *
  5. * Ported from Intel released Quark UEFI BIOS
  6. * QuarkSocPkg/QuarkNorthCluster/MemoryInit/Pei
  7. *
  8. * SPDX-License-Identifier: Intel
  9. */
  10. /*
  11. * This is the main Quark Memory Reference Code (MRC)
  12. *
  13. * These functions are generic and should work for any Quark-based board.
  14. *
  15. * MRC requires two data structures to be passed in which are initialized by
  16. * mrc_adjust_params().
  17. *
  18. * The basic flow is as follows:
  19. * 01) Check for supported DDR speed configuration
  20. * 02) Set up Memory Manager buffer as pass-through (POR)
  21. * 03) Set Channel Interleaving Mode and Channel Stride to the most aggressive
  22. * setting possible
  23. * 04) Set up the Memory Controller logic
  24. * 05) Set up the DDR_PHY logic
  25. * 06) Initialise the DRAMs (JEDEC)
  26. * 07) Perform the Receive Enable Calibration algorithm
  27. * 08) Perform the Write Leveling algorithm
  28. * 09) Perform the Read Training algorithm (includes internal Vref)
  29. * 10) Perform the Write Training algorithm
  30. * 11) Set Channel Interleaving Mode and Channel Stride to the desired settings
  31. *
  32. * DRAM unit configuration based on Valleyview MRC.
  33. */
  34. #include <common.h>
  35. #include <asm/arch/mrc.h>
  36. #include <asm/arch/msg_port.h>
  37. #include "mrc_util.h"
  38. #include "smc.h"
  39. static const struct mem_init init[] = {
  40. { 0x0101, BM_COLD | BM_FAST | BM_WARM | BM_S3, clear_self_refresh },
  41. { 0x0200, BM_COLD | BM_FAST | BM_WARM | BM_S3, prog_ddr_timing_control },
  42. { 0x0103, BM_COLD | BM_FAST , prog_decode_before_jedec },
  43. { 0x0104, BM_COLD | BM_FAST , perform_ddr_reset },
  44. { 0x0300, BM_COLD | BM_FAST | BM_S3, ddrphy_init },
  45. { 0x0400, BM_COLD | BM_FAST , perform_jedec_init },
  46. { 0x0105, BM_COLD | BM_FAST , set_ddr_init_complete },
  47. { 0x0106, BM_FAST | BM_WARM | BM_S3, restore_timings },
  48. { 0x0106, BM_COLD , default_timings },
  49. { 0x0500, BM_COLD , rcvn_cal },
  50. { 0x0600, BM_COLD , wr_level },
  51. { 0x0120, BM_COLD , prog_page_ctrl },
  52. { 0x0700, BM_COLD , rd_train },
  53. { 0x0800, BM_COLD , wr_train },
  54. { 0x010b, BM_COLD , store_timings },
  55. { 0x010c, BM_COLD | BM_FAST | BM_WARM | BM_S3, enable_scrambling },
  56. { 0x010d, BM_COLD | BM_FAST | BM_WARM | BM_S3, prog_ddr_control },
  57. { 0x010e, BM_COLD | BM_FAST | BM_WARM | BM_S3, prog_dra_drb },
  58. { 0x010f, BM_WARM | BM_S3, perform_wake },
  59. { 0x0110, BM_COLD | BM_FAST | BM_WARM | BM_S3, change_refresh_period },
  60. { 0x0111, BM_COLD | BM_FAST | BM_WARM | BM_S3, set_auto_refresh },
  61. { 0x0112, BM_COLD | BM_FAST | BM_WARM | BM_S3, ecc_enable },
  62. { 0x0113, BM_COLD | BM_FAST , memory_test },
  63. { 0x0114, BM_COLD | BM_FAST | BM_WARM | BM_S3, lock_registers }
  64. };
  65. /* Adjust configuration parameters before initialization sequence */
  66. static void mrc_adjust_params(struct mrc_params *mrc_params)
  67. {
  68. const struct dram_params *dram_params;
  69. uint8_t dram_width;
  70. uint32_t rank_enables;
  71. uint32_t channel_width;
  72. ENTERFN();
  73. /* initially expect success */
  74. mrc_params->status = MRC_SUCCESS;
  75. dram_width = mrc_params->dram_width;
  76. rank_enables = mrc_params->rank_enables;
  77. channel_width = mrc_params->channel_width;
  78. /*
  79. * Setup board layout (must be reviewed as is selecting static timings)
  80. * 0 == R0 (DDR3 x16), 1 == R1 (DDR3 x16),
  81. * 2 == DV (DDR3 x8), 3 == SV (DDR3 x8).
  82. */
  83. if (dram_width == X8)
  84. mrc_params->board_id = 2; /* select x8 layout */
  85. else
  86. mrc_params->board_id = 0; /* select x16 layout */
  87. /* initially no memory */
  88. mrc_params->mem_size = 0;
  89. /* begin of channel settings */
  90. dram_params = &mrc_params->params;
  91. /*
  92. * Determine column bits:
  93. *
  94. * Column: 11 for 8Gbx8, else 10
  95. */
  96. mrc_params->column_bits[0] =
  97. (dram_params[0].density == 4) &&
  98. (dram_width == X8) ? 11 : 10;
  99. /*
  100. * Determine row bits:
  101. *
  102. * 512Mbx16=12 512Mbx8=13
  103. * 1Gbx16=13 1Gbx8=14
  104. * 2Gbx16=14 2Gbx8=15
  105. * 4Gbx16=15 4Gbx8=16
  106. * 8Gbx16=16 8Gbx8=16
  107. */
  108. mrc_params->row_bits[0] = 12 + dram_params[0].density +
  109. (dram_params[0].density < 4) &&
  110. (dram_width == X8) ? 1 : 0;
  111. /*
  112. * Determine per-channel memory size:
  113. *
  114. * (For 2 RANKs, multiply by 2)
  115. * (For 16 bit data bus, divide by 2)
  116. *
  117. * DENSITY WIDTH MEM_AVAILABLE
  118. * 512Mb x16 0x008000000 ( 128MB)
  119. * 512Mb x8 0x010000000 ( 256MB)
  120. * 1Gb x16 0x010000000 ( 256MB)
  121. * 1Gb x8 0x020000000 ( 512MB)
  122. * 2Gb x16 0x020000000 ( 512MB)
  123. * 2Gb x8 0x040000000 (1024MB)
  124. * 4Gb x16 0x040000000 (1024MB)
  125. * 4Gb x8 0x080000000 (2048MB)
  126. */
  127. mrc_params->channel_size[0] = 1 << dram_params[0].density;
  128. mrc_params->channel_size[0] *= (dram_width == X8) ? 2 : 1;
  129. mrc_params->channel_size[0] *= (rank_enables == 0x3) ? 2 : 1;
  130. mrc_params->channel_size[0] *= (channel_width == X16) ? 1 : 2;
  131. /* Determine memory size (convert number of 64MB/512Mb units) */
  132. mrc_params->mem_size += mrc_params->channel_size[0] << 26;
  133. LEAVEFN();
  134. }
  135. static void mrc_mem_init(struct mrc_params *mrc_params)
  136. {
  137. int i;
  138. ENTERFN();
  139. /* MRC started */
  140. mrc_post_code(0x01, 0x00);
  141. if (mrc_params->boot_mode != BM_COLD) {
  142. if (mrc_params->ddr_speed != mrc_params->timings.ddr_speed) {
  143. /* full training required as frequency changed */
  144. mrc_params->boot_mode = BM_COLD;
  145. }
  146. }
  147. for (i = 0; i < ARRAY_SIZE(init); i++) {
  148. uint64_t my_tsc;
  149. if (mrc_params->boot_mode & init[i].boot_path) {
  150. uint8_t major = init[i].post_code >> 8 & 0xff;
  151. uint8_t minor = init[i].post_code >> 0 & 0xff;
  152. mrc_post_code(major, minor);
  153. my_tsc = rdtsc();
  154. init[i].init_fn(mrc_params);
  155. DPF(D_TIME, "Execution time %llx", rdtsc() - my_tsc);
  156. }
  157. }
  158. /* display the timings */
  159. print_timings(mrc_params);
  160. /* MRC complete */
  161. mrc_post_code(0x01, 0xff);
  162. LEAVEFN();
  163. }
  164. void mrc_init(struct mrc_params *mrc_params)
  165. {
  166. ENTERFN();
  167. DPF(D_INFO, "MRC Version %04x %s %s\n", MRC_VERSION,
  168. __DATE__, __TIME__);
  169. /* Set up the data structures used by mrc_mem_init() */
  170. mrc_adjust_params(mrc_params);
  171. /* Initialize system memory */
  172. mrc_mem_init(mrc_params);
  173. LEAVEFN();
  174. }