ddr2_dimm_params.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2008 Freescale Semiconductor, Inc.
  4. */
  5. #include <common.h>
  6. #include <fsl_ddr_sdram.h>
  7. #include <fsl_ddr.h>
  8. /*
  9. * Calculate the Density of each Physical Rank.
  10. * Returned size is in bytes.
  11. *
  12. * Study these table from Byte 31 of JEDEC SPD Spec.
  13. *
  14. * DDR I DDR II
  15. * Bit Size Size
  16. * --- ----- ------
  17. * 7 high 512MB 512MB
  18. * 6 256MB 256MB
  19. * 5 128MB 128MB
  20. * 4 64MB 16GB
  21. * 3 32MB 8GB
  22. * 2 16MB 4GB
  23. * 1 2GB 2GB
  24. * 0 low 1GB 1GB
  25. *
  26. * Reorder Table to be linear by stripping the bottom
  27. * 2 or 5 bits off and shifting them up to the top.
  28. *
  29. */
  30. static unsigned long long
  31. compute_ranksize(unsigned int mem_type, unsigned char row_dens)
  32. {
  33. unsigned long long bsize;
  34. /* Bottom 5 bits up to the top. */
  35. bsize = ((row_dens >> 5) | ((row_dens & 31) << 3));
  36. bsize <<= 27ULL;
  37. debug("DDR: DDR II rank density = 0x%16llx\n", bsize);
  38. return bsize;
  39. }
  40. /*
  41. * Convert a two-nibble BCD value into a cycle time.
  42. * While the spec calls for nano-seconds, picos are returned.
  43. *
  44. * This implements the tables for bytes 9, 23 and 25 for both
  45. * DDR I and II. No allowance for distinguishing the invalid
  46. * fields absent for DDR I yet present in DDR II is made.
  47. * (That is, cycle times of .25, .33, .66 and .75 ns are
  48. * allowed for both DDR II and I.)
  49. */
  50. static unsigned int
  51. convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
  52. {
  53. /* Table look up the lower nibble, allow DDR I & II. */
  54. unsigned int tenths_ps[16] = {
  55. 0,
  56. 100,
  57. 200,
  58. 300,
  59. 400,
  60. 500,
  61. 600,
  62. 700,
  63. 800,
  64. 900,
  65. 250, /* This and the next 3 entries valid ... */
  66. 330, /* ... only for tCK calculations. */
  67. 660,
  68. 750,
  69. 0, /* undefined */
  70. 0 /* undefined */
  71. };
  72. unsigned int whole_ns = (spd_val & 0xF0) >> 4;
  73. unsigned int tenth_ns = spd_val & 0x0F;
  74. unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
  75. return ps;
  76. }
  77. static unsigned int
  78. convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
  79. {
  80. unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
  81. unsigned int hundredth_ns = spd_val & 0x0F;
  82. unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
  83. return ps;
  84. }
  85. static unsigned int byte40_table_ps[8] = {
  86. 0,
  87. 250,
  88. 330,
  89. 500,
  90. 660,
  91. 750,
  92. 0, /* supposed to be RFC, but not sure what that means */
  93. 0 /* Undefined */
  94. };
  95. static unsigned int
  96. compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
  97. {
  98. return (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
  99. + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
  100. }
  101. static unsigned int
  102. compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
  103. {
  104. return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
  105. }
  106. /*
  107. * Determine Refresh Rate. Ignore self refresh bit on DDR I.
  108. * Table from SPD Spec, Byte 12, converted to picoseconds and
  109. * filled in with "default" normal values.
  110. */
  111. static unsigned int
  112. determine_refresh_rate_ps(const unsigned int spd_refresh)
  113. {
  114. unsigned int refresh_time_ps[8] = {
  115. 15625000, /* 0 Normal 1.00x */
  116. 3900000, /* 1 Reduced .25x */
  117. 7800000, /* 2 Extended .50x */
  118. 31300000, /* 3 Extended 2.00x */
  119. 62500000, /* 4 Extended 4.00x */
  120. 125000000, /* 5 Extended 8.00x */
  121. 15625000, /* 6 Normal 1.00x filler */
  122. 15625000, /* 7 Normal 1.00x filler */
  123. };
  124. return refresh_time_ps[spd_refresh & 0x7];
  125. }
  126. /*
  127. * The purpose of this function is to compute a suitable
  128. * CAS latency given the DRAM clock period. The SPD only
  129. * defines at most 3 CAS latencies. Typically the slower in
  130. * frequency the DIMM runs at, the shorter its CAS latency can.
  131. * be. If the DIMM is operating at a sufficiently low frequency,
  132. * it may be able to run at a CAS latency shorter than the
  133. * shortest SPD-defined CAS latency.
  134. *
  135. * If a CAS latency is not found, 0 is returned.
  136. *
  137. * Do this by finding in the standard speed bin table the longest
  138. * tCKmin that doesn't exceed the value of mclk_ps (tCK).
  139. *
  140. * An assumption made is that the SDRAM device allows the
  141. * CL to be programmed for a value that is lower than those
  142. * advertised by the SPD. This is not always the case,
  143. * as those modes not defined in the SPD are optional.
  144. *
  145. * CAS latency de-rating based upon values JEDEC Standard No. 79-2C
  146. * Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS,
  147. * and tRC for corresponding bin"
  148. *
  149. * ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3
  150. * Not certain if any good value exists for CL=2
  151. */
  152. /* CL2 CL3 CL4 CL5 CL6 CL7*/
  153. unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500, 1875 };
  154. unsigned int
  155. compute_derated_DDR2_CAS_latency(unsigned int mclk_ps)
  156. {
  157. const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins);
  158. unsigned int lowest_tCKmin_found = 0;
  159. unsigned int lowest_tCKmin_CL = 0;
  160. unsigned int i;
  161. debug("mclk_ps = %u\n", mclk_ps);
  162. for (i = 0; i < num_speed_bins; i++) {
  163. unsigned int x = ddr2_speed_bins[i];
  164. debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
  165. i, x, lowest_tCKmin_found);
  166. if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) {
  167. lowest_tCKmin_found = x;
  168. lowest_tCKmin_CL = i + 2;
  169. }
  170. }
  171. debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
  172. return lowest_tCKmin_CL;
  173. }
  174. /*
  175. * ddr_compute_dimm_parameters for DDR2 SPD
  176. *
  177. * Compute DIMM parameters based upon the SPD information in spd.
  178. * Writes the results to the dimm_params_t structure pointed by pdimm.
  179. *
  180. * FIXME: use #define for the retvals
  181. */
  182. unsigned int ddr_compute_dimm_parameters(const unsigned int ctrl_num,
  183. const ddr2_spd_eeprom_t *spd,
  184. dimm_params_t *pdimm,
  185. unsigned int dimm_number)
  186. {
  187. unsigned int retval;
  188. if (spd->mem_type) {
  189. if (spd->mem_type != SPD_MEMTYPE_DDR2) {
  190. printf("DIMM %u: is not a DDR2 SPD.\n", dimm_number);
  191. return 1;
  192. }
  193. } else {
  194. memset(pdimm, 0, sizeof(dimm_params_t));
  195. return 1;
  196. }
  197. retval = ddr2_spd_check(spd);
  198. if (retval) {
  199. printf("DIMM %u: failed checksum\n", dimm_number);
  200. return 2;
  201. }
  202. /*
  203. * The part name in ASCII in the SPD EEPROM is not null terminated.
  204. * Guarantee null termination here by presetting all bytes to 0
  205. * and copying the part name in ASCII from the SPD onto it
  206. */
  207. memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
  208. memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
  209. /* DIMM organization parameters */
  210. pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1;
  211. pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens);
  212. pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
  213. pdimm->data_width = spd->dataw;
  214. pdimm->primary_sdram_width = spd->primw;
  215. pdimm->ec_sdram_width = spd->ecw;
  216. /* These are all the types defined by the JEDEC DDR2 SPD 1.3 spec */
  217. switch (spd->dimm_type) {
  218. case DDR2_SPD_DIMMTYPE_RDIMM:
  219. case DDR2_SPD_DIMMTYPE_72B_SO_RDIMM:
  220. case DDR2_SPD_DIMMTYPE_MINI_RDIMM:
  221. /* Registered/buffered DIMMs */
  222. pdimm->registered_dimm = 1;
  223. break;
  224. case DDR2_SPD_DIMMTYPE_UDIMM:
  225. case DDR2_SPD_DIMMTYPE_SO_DIMM:
  226. case DDR2_SPD_DIMMTYPE_MICRO_DIMM:
  227. case DDR2_SPD_DIMMTYPE_MINI_UDIMM:
  228. /* Unbuffered DIMMs */
  229. pdimm->registered_dimm = 0;
  230. break;
  231. case DDR2_SPD_DIMMTYPE_72B_SO_CDIMM:
  232. default:
  233. printf("unknown dimm_type 0x%02X\n", spd->dimm_type);
  234. return 1;
  235. }
  236. /* SDRAM device parameters */
  237. pdimm->n_row_addr = spd->nrow_addr;
  238. pdimm->n_col_addr = spd->ncol_addr;
  239. pdimm->n_banks_per_sdram_device = spd->nbanks;
  240. pdimm->edc_config = spd->config;
  241. pdimm->burst_lengths_bitmask = spd->burstl;
  242. /*
  243. * Calculate the Maximum Data Rate based on the Minimum Cycle time.
  244. * The SPD clk_cycle field (tCKmin) is measured in tenths of
  245. * nanoseconds and represented as BCD.
  246. */
  247. pdimm->tckmin_x_ps
  248. = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
  249. pdimm->tckmin_x_minus_1_ps
  250. = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
  251. pdimm->tckmin_x_minus_2_ps
  252. = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
  253. pdimm->tckmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax);
  254. /*
  255. * Compute CAS latencies defined by SPD
  256. * The SPD caslat_x should have at least 1 and at most 3 bits set.
  257. *
  258. * If cas_lat after masking is 0, the __ilog2 function returns
  259. * 255 into the variable. This behavior is abused once.
  260. */
  261. pdimm->caslat_x = __ilog2(spd->cas_lat);
  262. pdimm->caslat_x_minus_1 = __ilog2(spd->cas_lat
  263. & ~(1 << pdimm->caslat_x));
  264. pdimm->caslat_x_minus_2 = __ilog2(spd->cas_lat
  265. & ~(1 << pdimm->caslat_x)
  266. & ~(1 << pdimm->caslat_x_minus_1));
  267. /* Compute CAS latencies below that defined by SPD */
  268. pdimm->caslat_lowest_derated = compute_derated_DDR2_CAS_latency(
  269. get_memory_clk_period_ps(ctrl_num));
  270. /* Compute timing parameters */
  271. pdimm->trcd_ps = spd->trcd * 250;
  272. pdimm->trp_ps = spd->trp * 250;
  273. pdimm->tras_ps = spd->tras * 1000;
  274. pdimm->twr_ps = spd->twr * 250;
  275. pdimm->twtr_ps = spd->twtr * 250;
  276. pdimm->trfc_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc);
  277. pdimm->trrd_ps = spd->trrd * 250;
  278. pdimm->trc_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc);
  279. pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
  280. pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
  281. pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
  282. pdimm->tds_ps
  283. = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
  284. pdimm->tdh_ps
  285. = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
  286. pdimm->trtp_ps = spd->trtp * 250;
  287. pdimm->tdqsq_max_ps = spd->tdqsq * 10;
  288. pdimm->tqhs_ps = spd->tqhs * 10;
  289. return 0;
  290. }