options.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
  4. * Copyright 2017-2018 NXP Semiconductor
  5. */
  6. #include <common.h>
  7. #include <hwconfig.h>
  8. #include <fsl_ddr_sdram.h>
  9. #include <fsl_ddr.h>
  10. #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
  11. defined(CONFIG_ARM)
  12. #include <asm/arch/clock.h>
  13. #endif
  14. /*
  15. * Use our own stack based buffer before relocation to allow accessing longer
  16. * hwconfig strings that might be in the environment before we've relocated.
  17. * This is pretty fragile on both the use of stack and if the buffer is big
  18. * enough. However we will get a warning from env_get_f() for the latter.
  19. */
  20. /* Board-specific functions defined in each board's ddr.c */
  21. extern void fsl_ddr_board_options(memctl_options_t *popts,
  22. dimm_params_t *pdimm,
  23. unsigned int ctrl_num);
  24. struct dynamic_odt {
  25. unsigned int odt_rd_cfg;
  26. unsigned int odt_wr_cfg;
  27. unsigned int odt_rtt_norm;
  28. unsigned int odt_rtt_wr;
  29. };
  30. #ifdef CONFIG_SYS_FSL_DDR4
  31. /* Quad rank is not verified yet due availability.
  32. * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
  33. */
  34. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  35. { /* cs0 */
  36. FSL_DDR_ODT_NEVER,
  37. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  38. DDR4_RTT_34_OHM, /* unverified */
  39. DDR4_RTT_120_OHM
  40. },
  41. { /* cs1 */
  42. FSL_DDR_ODT_NEVER,
  43. FSL_DDR_ODT_NEVER,
  44. DDR4_RTT_OFF,
  45. DDR4_RTT_120_OHM
  46. },
  47. { /* cs2 */
  48. FSL_DDR_ODT_NEVER,
  49. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  50. DDR4_RTT_34_OHM,
  51. DDR4_RTT_120_OHM
  52. },
  53. { /* cs3 */
  54. FSL_DDR_ODT_NEVER,
  55. FSL_DDR_ODT_NEVER, /* tied high */
  56. DDR4_RTT_OFF,
  57. DDR4_RTT_120_OHM
  58. }
  59. };
  60. static __maybe_unused const struct dynamic_odt single_D[4] = {
  61. { /* cs0 */
  62. FSL_DDR_ODT_NEVER,
  63. FSL_DDR_ODT_ALL,
  64. DDR4_RTT_40_OHM,
  65. DDR4_RTT_OFF
  66. },
  67. { /* cs1 */
  68. FSL_DDR_ODT_NEVER,
  69. FSL_DDR_ODT_NEVER,
  70. DDR4_RTT_OFF,
  71. DDR4_RTT_OFF
  72. },
  73. {0, 0, 0, 0},
  74. {0, 0, 0, 0}
  75. };
  76. static __maybe_unused const struct dynamic_odt single_S[4] = {
  77. { /* cs0 */
  78. FSL_DDR_ODT_NEVER,
  79. FSL_DDR_ODT_ALL,
  80. DDR4_RTT_40_OHM,
  81. DDR4_RTT_OFF
  82. },
  83. {0, 0, 0, 0},
  84. {0, 0, 0, 0},
  85. {0, 0, 0, 0},
  86. };
  87. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  88. { /* cs0 */
  89. FSL_DDR_ODT_NEVER,
  90. FSL_DDR_ODT_SAME_DIMM,
  91. DDR4_RTT_120_OHM,
  92. DDR4_RTT_OFF
  93. },
  94. { /* cs1 */
  95. FSL_DDR_ODT_OTHER_DIMM,
  96. FSL_DDR_ODT_OTHER_DIMM,
  97. DDR4_RTT_34_OHM,
  98. DDR4_RTT_OFF
  99. },
  100. { /* cs2 */
  101. FSL_DDR_ODT_NEVER,
  102. FSL_DDR_ODT_SAME_DIMM,
  103. DDR4_RTT_120_OHM,
  104. DDR4_RTT_OFF
  105. },
  106. { /* cs3 */
  107. FSL_DDR_ODT_OTHER_DIMM,
  108. FSL_DDR_ODT_OTHER_DIMM,
  109. DDR4_RTT_34_OHM,
  110. DDR4_RTT_OFF
  111. }
  112. };
  113. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  114. { /* cs0 */
  115. FSL_DDR_ODT_NEVER,
  116. FSL_DDR_ODT_SAME_DIMM,
  117. DDR4_RTT_120_OHM,
  118. DDR4_RTT_OFF
  119. },
  120. { /* cs1 */
  121. FSL_DDR_ODT_OTHER_DIMM,
  122. FSL_DDR_ODT_OTHER_DIMM,
  123. DDR4_RTT_34_OHM,
  124. DDR4_RTT_OFF
  125. },
  126. { /* cs2 */
  127. FSL_DDR_ODT_OTHER_DIMM,
  128. FSL_DDR_ODT_ALL,
  129. DDR4_RTT_34_OHM,
  130. DDR4_RTT_120_OHM
  131. },
  132. {0, 0, 0, 0}
  133. };
  134. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  135. { /* cs0 */
  136. FSL_DDR_ODT_OTHER_DIMM,
  137. FSL_DDR_ODT_ALL,
  138. DDR4_RTT_34_OHM,
  139. DDR4_RTT_120_OHM
  140. },
  141. {0, 0, 0, 0},
  142. { /* cs2 */
  143. FSL_DDR_ODT_NEVER,
  144. FSL_DDR_ODT_SAME_DIMM,
  145. DDR4_RTT_120_OHM,
  146. DDR4_RTT_OFF
  147. },
  148. { /* cs3 */
  149. FSL_DDR_ODT_OTHER_DIMM,
  150. FSL_DDR_ODT_OTHER_DIMM,
  151. DDR4_RTT_34_OHM,
  152. DDR4_RTT_OFF
  153. }
  154. };
  155. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  156. { /* cs0 */
  157. FSL_DDR_ODT_OTHER_DIMM,
  158. FSL_DDR_ODT_ALL,
  159. DDR4_RTT_34_OHM,
  160. DDR4_RTT_120_OHM
  161. },
  162. {0, 0, 0, 0},
  163. { /* cs2 */
  164. FSL_DDR_ODT_OTHER_DIMM,
  165. FSL_DDR_ODT_ALL,
  166. DDR4_RTT_34_OHM,
  167. DDR4_RTT_120_OHM
  168. },
  169. {0, 0, 0, 0}
  170. };
  171. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  172. { /* cs0 */
  173. FSL_DDR_ODT_NEVER,
  174. FSL_DDR_ODT_SAME_DIMM,
  175. DDR4_RTT_40_OHM,
  176. DDR4_RTT_OFF
  177. },
  178. { /* cs1 */
  179. FSL_DDR_ODT_NEVER,
  180. FSL_DDR_ODT_NEVER,
  181. DDR4_RTT_OFF,
  182. DDR4_RTT_OFF
  183. },
  184. {0, 0, 0, 0},
  185. {0, 0, 0, 0}
  186. };
  187. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  188. {0, 0, 0, 0},
  189. {0, 0, 0, 0},
  190. { /* cs2 */
  191. FSL_DDR_ODT_NEVER,
  192. FSL_DDR_ODT_SAME_DIMM,
  193. DDR4_RTT_40_OHM,
  194. DDR4_RTT_OFF
  195. },
  196. { /* cs3 */
  197. FSL_DDR_ODT_NEVER,
  198. FSL_DDR_ODT_NEVER,
  199. DDR4_RTT_OFF,
  200. DDR4_RTT_OFF
  201. }
  202. };
  203. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  204. { /* cs0 */
  205. FSL_DDR_ODT_NEVER,
  206. FSL_DDR_ODT_CS,
  207. DDR4_RTT_40_OHM,
  208. DDR4_RTT_OFF
  209. },
  210. {0, 0, 0, 0},
  211. {0, 0, 0, 0},
  212. {0, 0, 0, 0}
  213. };
  214. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  215. {0, 0, 0, 0},
  216. {0, 0, 0, 0},
  217. { /* cs2 */
  218. FSL_DDR_ODT_NEVER,
  219. FSL_DDR_ODT_CS,
  220. DDR4_RTT_40_OHM,
  221. DDR4_RTT_OFF
  222. },
  223. {0, 0, 0, 0}
  224. };
  225. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  226. { /* cs0 */
  227. FSL_DDR_ODT_NEVER,
  228. FSL_DDR_ODT_CS,
  229. DDR4_RTT_120_OHM,
  230. DDR4_RTT_OFF
  231. },
  232. { /* cs1 */
  233. FSL_DDR_ODT_NEVER,
  234. FSL_DDR_ODT_CS,
  235. DDR4_RTT_120_OHM,
  236. DDR4_RTT_OFF
  237. },
  238. { /* cs2 */
  239. FSL_DDR_ODT_NEVER,
  240. FSL_DDR_ODT_CS,
  241. DDR4_RTT_120_OHM,
  242. DDR4_RTT_OFF
  243. },
  244. { /* cs3 */
  245. FSL_DDR_ODT_NEVER,
  246. FSL_DDR_ODT_CS,
  247. DDR4_RTT_120_OHM,
  248. DDR4_RTT_OFF
  249. }
  250. };
  251. #elif defined(CONFIG_SYS_FSL_DDR3)
  252. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  253. { /* cs0 */
  254. FSL_DDR_ODT_NEVER,
  255. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  256. DDR3_RTT_20_OHM,
  257. DDR3_RTT_120_OHM
  258. },
  259. { /* cs1 */
  260. FSL_DDR_ODT_NEVER,
  261. FSL_DDR_ODT_NEVER, /* tied high */
  262. DDR3_RTT_OFF,
  263. DDR3_RTT_120_OHM
  264. },
  265. { /* cs2 */
  266. FSL_DDR_ODT_NEVER,
  267. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  268. DDR3_RTT_20_OHM,
  269. DDR3_RTT_120_OHM
  270. },
  271. { /* cs3 */
  272. FSL_DDR_ODT_NEVER,
  273. FSL_DDR_ODT_NEVER, /* tied high */
  274. DDR3_RTT_OFF,
  275. DDR3_RTT_120_OHM
  276. }
  277. };
  278. static __maybe_unused const struct dynamic_odt single_D[4] = {
  279. { /* cs0 */
  280. FSL_DDR_ODT_NEVER,
  281. FSL_DDR_ODT_ALL,
  282. DDR3_RTT_40_OHM,
  283. DDR3_RTT_OFF
  284. },
  285. { /* cs1 */
  286. FSL_DDR_ODT_NEVER,
  287. FSL_DDR_ODT_NEVER,
  288. DDR3_RTT_OFF,
  289. DDR3_RTT_OFF
  290. },
  291. {0, 0, 0, 0},
  292. {0, 0, 0, 0}
  293. };
  294. static __maybe_unused const struct dynamic_odt single_S[4] = {
  295. { /* cs0 */
  296. FSL_DDR_ODT_NEVER,
  297. FSL_DDR_ODT_ALL,
  298. DDR3_RTT_40_OHM,
  299. DDR3_RTT_OFF
  300. },
  301. {0, 0, 0, 0},
  302. {0, 0, 0, 0},
  303. {0, 0, 0, 0},
  304. };
  305. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  306. { /* cs0 */
  307. FSL_DDR_ODT_NEVER,
  308. FSL_DDR_ODT_SAME_DIMM,
  309. DDR3_RTT_120_OHM,
  310. DDR3_RTT_OFF
  311. },
  312. { /* cs1 */
  313. FSL_DDR_ODT_OTHER_DIMM,
  314. FSL_DDR_ODT_OTHER_DIMM,
  315. DDR3_RTT_30_OHM,
  316. DDR3_RTT_OFF
  317. },
  318. { /* cs2 */
  319. FSL_DDR_ODT_NEVER,
  320. FSL_DDR_ODT_SAME_DIMM,
  321. DDR3_RTT_120_OHM,
  322. DDR3_RTT_OFF
  323. },
  324. { /* cs3 */
  325. FSL_DDR_ODT_OTHER_DIMM,
  326. FSL_DDR_ODT_OTHER_DIMM,
  327. DDR3_RTT_30_OHM,
  328. DDR3_RTT_OFF
  329. }
  330. };
  331. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  332. { /* cs0 */
  333. FSL_DDR_ODT_NEVER,
  334. FSL_DDR_ODT_SAME_DIMM,
  335. DDR3_RTT_120_OHM,
  336. DDR3_RTT_OFF
  337. },
  338. { /* cs1 */
  339. FSL_DDR_ODT_OTHER_DIMM,
  340. FSL_DDR_ODT_OTHER_DIMM,
  341. DDR3_RTT_30_OHM,
  342. DDR3_RTT_OFF
  343. },
  344. { /* cs2 */
  345. FSL_DDR_ODT_OTHER_DIMM,
  346. FSL_DDR_ODT_ALL,
  347. DDR3_RTT_20_OHM,
  348. DDR3_RTT_120_OHM
  349. },
  350. {0, 0, 0, 0}
  351. };
  352. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  353. { /* cs0 */
  354. FSL_DDR_ODT_OTHER_DIMM,
  355. FSL_DDR_ODT_ALL,
  356. DDR3_RTT_20_OHM,
  357. DDR3_RTT_120_OHM
  358. },
  359. {0, 0, 0, 0},
  360. { /* cs2 */
  361. FSL_DDR_ODT_NEVER,
  362. FSL_DDR_ODT_SAME_DIMM,
  363. DDR3_RTT_120_OHM,
  364. DDR3_RTT_OFF
  365. },
  366. { /* cs3 */
  367. FSL_DDR_ODT_OTHER_DIMM,
  368. FSL_DDR_ODT_OTHER_DIMM,
  369. DDR3_RTT_20_OHM,
  370. DDR3_RTT_OFF
  371. }
  372. };
  373. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  374. { /* cs0 */
  375. FSL_DDR_ODT_OTHER_DIMM,
  376. FSL_DDR_ODT_ALL,
  377. DDR3_RTT_30_OHM,
  378. DDR3_RTT_120_OHM
  379. },
  380. {0, 0, 0, 0},
  381. { /* cs2 */
  382. FSL_DDR_ODT_OTHER_DIMM,
  383. FSL_DDR_ODT_ALL,
  384. DDR3_RTT_30_OHM,
  385. DDR3_RTT_120_OHM
  386. },
  387. {0, 0, 0, 0}
  388. };
  389. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  390. { /* cs0 */
  391. FSL_DDR_ODT_NEVER,
  392. FSL_DDR_ODT_SAME_DIMM,
  393. DDR3_RTT_40_OHM,
  394. DDR3_RTT_OFF
  395. },
  396. { /* cs1 */
  397. FSL_DDR_ODT_NEVER,
  398. FSL_DDR_ODT_NEVER,
  399. DDR3_RTT_OFF,
  400. DDR3_RTT_OFF
  401. },
  402. {0, 0, 0, 0},
  403. {0, 0, 0, 0}
  404. };
  405. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  406. {0, 0, 0, 0},
  407. {0, 0, 0, 0},
  408. { /* cs2 */
  409. FSL_DDR_ODT_NEVER,
  410. FSL_DDR_ODT_SAME_DIMM,
  411. DDR3_RTT_40_OHM,
  412. DDR3_RTT_OFF
  413. },
  414. { /* cs3 */
  415. FSL_DDR_ODT_NEVER,
  416. FSL_DDR_ODT_NEVER,
  417. DDR3_RTT_OFF,
  418. DDR3_RTT_OFF
  419. }
  420. };
  421. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  422. { /* cs0 */
  423. FSL_DDR_ODT_NEVER,
  424. FSL_DDR_ODT_CS,
  425. DDR3_RTT_40_OHM,
  426. DDR3_RTT_OFF
  427. },
  428. {0, 0, 0, 0},
  429. {0, 0, 0, 0},
  430. {0, 0, 0, 0}
  431. };
  432. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  433. {0, 0, 0, 0},
  434. {0, 0, 0, 0},
  435. { /* cs2 */
  436. FSL_DDR_ODT_NEVER,
  437. FSL_DDR_ODT_CS,
  438. DDR3_RTT_40_OHM,
  439. DDR3_RTT_OFF
  440. },
  441. {0, 0, 0, 0}
  442. };
  443. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  444. { /* cs0 */
  445. FSL_DDR_ODT_NEVER,
  446. FSL_DDR_ODT_CS,
  447. DDR3_RTT_120_OHM,
  448. DDR3_RTT_OFF
  449. },
  450. { /* cs1 */
  451. FSL_DDR_ODT_NEVER,
  452. FSL_DDR_ODT_CS,
  453. DDR3_RTT_120_OHM,
  454. DDR3_RTT_OFF
  455. },
  456. { /* cs2 */
  457. FSL_DDR_ODT_NEVER,
  458. FSL_DDR_ODT_CS,
  459. DDR3_RTT_120_OHM,
  460. DDR3_RTT_OFF
  461. },
  462. { /* cs3 */
  463. FSL_DDR_ODT_NEVER,
  464. FSL_DDR_ODT_CS,
  465. DDR3_RTT_120_OHM,
  466. DDR3_RTT_OFF
  467. }
  468. };
  469. #else /* CONFIG_SYS_FSL_DDR3 */
  470. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  471. {0, 0, 0, 0},
  472. {0, 0, 0, 0},
  473. {0, 0, 0, 0},
  474. {0, 0, 0, 0}
  475. };
  476. static __maybe_unused const struct dynamic_odt single_D[4] = {
  477. { /* cs0 */
  478. FSL_DDR_ODT_NEVER,
  479. FSL_DDR_ODT_ALL,
  480. DDR2_RTT_150_OHM,
  481. DDR2_RTT_OFF
  482. },
  483. { /* cs1 */
  484. FSL_DDR_ODT_NEVER,
  485. FSL_DDR_ODT_NEVER,
  486. DDR2_RTT_OFF,
  487. DDR2_RTT_OFF
  488. },
  489. {0, 0, 0, 0},
  490. {0, 0, 0, 0}
  491. };
  492. static __maybe_unused const struct dynamic_odt single_S[4] = {
  493. { /* cs0 */
  494. FSL_DDR_ODT_NEVER,
  495. FSL_DDR_ODT_ALL,
  496. DDR2_RTT_150_OHM,
  497. DDR2_RTT_OFF
  498. },
  499. {0, 0, 0, 0},
  500. {0, 0, 0, 0},
  501. {0, 0, 0, 0},
  502. };
  503. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  504. { /* cs0 */
  505. FSL_DDR_ODT_OTHER_DIMM,
  506. FSL_DDR_ODT_OTHER_DIMM,
  507. DDR2_RTT_75_OHM,
  508. DDR2_RTT_OFF
  509. },
  510. { /* cs1 */
  511. FSL_DDR_ODT_NEVER,
  512. FSL_DDR_ODT_NEVER,
  513. DDR2_RTT_OFF,
  514. DDR2_RTT_OFF
  515. },
  516. { /* cs2 */
  517. FSL_DDR_ODT_OTHER_DIMM,
  518. FSL_DDR_ODT_OTHER_DIMM,
  519. DDR2_RTT_75_OHM,
  520. DDR2_RTT_OFF
  521. },
  522. { /* cs3 */
  523. FSL_DDR_ODT_NEVER,
  524. FSL_DDR_ODT_NEVER,
  525. DDR2_RTT_OFF,
  526. DDR2_RTT_OFF
  527. }
  528. };
  529. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  530. { /* cs0 */
  531. FSL_DDR_ODT_OTHER_DIMM,
  532. FSL_DDR_ODT_OTHER_DIMM,
  533. DDR2_RTT_75_OHM,
  534. DDR2_RTT_OFF
  535. },
  536. { /* cs1 */
  537. FSL_DDR_ODT_NEVER,
  538. FSL_DDR_ODT_NEVER,
  539. DDR2_RTT_OFF,
  540. DDR2_RTT_OFF
  541. },
  542. { /* cs2 */
  543. FSL_DDR_ODT_OTHER_DIMM,
  544. FSL_DDR_ODT_OTHER_DIMM,
  545. DDR2_RTT_75_OHM,
  546. DDR2_RTT_OFF
  547. },
  548. {0, 0, 0, 0}
  549. };
  550. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  551. { /* cs0 */
  552. FSL_DDR_ODT_OTHER_DIMM,
  553. FSL_DDR_ODT_OTHER_DIMM,
  554. DDR2_RTT_75_OHM,
  555. DDR2_RTT_OFF
  556. },
  557. {0, 0, 0, 0},
  558. { /* cs2 */
  559. FSL_DDR_ODT_OTHER_DIMM,
  560. FSL_DDR_ODT_OTHER_DIMM,
  561. DDR2_RTT_75_OHM,
  562. DDR2_RTT_OFF
  563. },
  564. { /* cs3 */
  565. FSL_DDR_ODT_NEVER,
  566. FSL_DDR_ODT_NEVER,
  567. DDR2_RTT_OFF,
  568. DDR2_RTT_OFF
  569. }
  570. };
  571. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  572. { /* cs0 */
  573. FSL_DDR_ODT_OTHER_DIMM,
  574. FSL_DDR_ODT_OTHER_DIMM,
  575. DDR2_RTT_75_OHM,
  576. DDR2_RTT_OFF
  577. },
  578. {0, 0, 0, 0},
  579. { /* cs2 */
  580. FSL_DDR_ODT_OTHER_DIMM,
  581. FSL_DDR_ODT_OTHER_DIMM,
  582. DDR2_RTT_75_OHM,
  583. DDR2_RTT_OFF
  584. },
  585. {0, 0, 0, 0}
  586. };
  587. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  588. { /* cs0 */
  589. FSL_DDR_ODT_NEVER,
  590. FSL_DDR_ODT_ALL,
  591. DDR2_RTT_150_OHM,
  592. DDR2_RTT_OFF
  593. },
  594. { /* cs1 */
  595. FSL_DDR_ODT_NEVER,
  596. FSL_DDR_ODT_NEVER,
  597. DDR2_RTT_OFF,
  598. DDR2_RTT_OFF
  599. },
  600. {0, 0, 0, 0},
  601. {0, 0, 0, 0}
  602. };
  603. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  604. {0, 0, 0, 0},
  605. {0, 0, 0, 0},
  606. { /* cs2 */
  607. FSL_DDR_ODT_NEVER,
  608. FSL_DDR_ODT_ALL,
  609. DDR2_RTT_150_OHM,
  610. DDR2_RTT_OFF
  611. },
  612. { /* cs3 */
  613. FSL_DDR_ODT_NEVER,
  614. FSL_DDR_ODT_NEVER,
  615. DDR2_RTT_OFF,
  616. DDR2_RTT_OFF
  617. }
  618. };
  619. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  620. { /* cs0 */
  621. FSL_DDR_ODT_NEVER,
  622. FSL_DDR_ODT_CS,
  623. DDR2_RTT_150_OHM,
  624. DDR2_RTT_OFF
  625. },
  626. {0, 0, 0, 0},
  627. {0, 0, 0, 0},
  628. {0, 0, 0, 0}
  629. };
  630. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  631. {0, 0, 0, 0},
  632. {0, 0, 0, 0},
  633. { /* cs2 */
  634. FSL_DDR_ODT_NEVER,
  635. FSL_DDR_ODT_CS,
  636. DDR2_RTT_150_OHM,
  637. DDR2_RTT_OFF
  638. },
  639. {0, 0, 0, 0}
  640. };
  641. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  642. { /* cs0 */
  643. FSL_DDR_ODT_NEVER,
  644. FSL_DDR_ODT_CS,
  645. DDR2_RTT_75_OHM,
  646. DDR2_RTT_OFF
  647. },
  648. { /* cs1 */
  649. FSL_DDR_ODT_NEVER,
  650. FSL_DDR_ODT_NEVER,
  651. DDR2_RTT_OFF,
  652. DDR2_RTT_OFF
  653. },
  654. { /* cs2 */
  655. FSL_DDR_ODT_NEVER,
  656. FSL_DDR_ODT_CS,
  657. DDR2_RTT_75_OHM,
  658. DDR2_RTT_OFF
  659. },
  660. { /* cs3 */
  661. FSL_DDR_ODT_NEVER,
  662. FSL_DDR_ODT_NEVER,
  663. DDR2_RTT_OFF,
  664. DDR2_RTT_OFF
  665. }
  666. };
  667. #endif
  668. /*
  669. * Automatically seleect bank interleaving mode based on DIMMs
  670. * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
  671. * This function only deal with one or two slots per controller.
  672. */
  673. static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
  674. {
  675. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  676. if (pdimm[0].n_ranks == 4)
  677. return FSL_DDR_CS0_CS1_CS2_CS3;
  678. else if (pdimm[0].n_ranks == 2)
  679. return FSL_DDR_CS0_CS1;
  680. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  681. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  682. if (pdimm[0].n_ranks == 4)
  683. return FSL_DDR_CS0_CS1_CS2_CS3;
  684. #endif
  685. if (pdimm[0].n_ranks == 2) {
  686. if (pdimm[1].n_ranks == 2)
  687. return FSL_DDR_CS0_CS1_CS2_CS3;
  688. else
  689. return FSL_DDR_CS0_CS1;
  690. }
  691. #endif
  692. return 0;
  693. }
  694. unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
  695. memctl_options_t *popts,
  696. dimm_params_t *pdimm,
  697. unsigned int ctrl_num)
  698. {
  699. unsigned int i;
  700. char buf[HWCONFIG_BUFFER_SIZE];
  701. #if defined(CONFIG_SYS_FSL_DDR3) || \
  702. defined(CONFIG_SYS_FSL_DDR2) || \
  703. defined(CONFIG_SYS_FSL_DDR4)
  704. const struct dynamic_odt *pdodt = odt_unknown;
  705. #endif
  706. #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
  707. ulong ddr_freq;
  708. #endif
  709. /*
  710. * Extract hwconfig from environment since we have not properly setup
  711. * the environment but need it for ddr config params
  712. */
  713. if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
  714. buf[0] = '\0';
  715. #if defined(CONFIG_SYS_FSL_DDR3) || \
  716. defined(CONFIG_SYS_FSL_DDR2) || \
  717. defined(CONFIG_SYS_FSL_DDR4)
  718. /* Chip select options. */
  719. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  720. switch (pdimm[0].n_ranks) {
  721. case 1:
  722. pdodt = single_S;
  723. break;
  724. case 2:
  725. pdodt = single_D;
  726. break;
  727. case 4:
  728. pdodt = single_Q;
  729. break;
  730. }
  731. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  732. switch (pdimm[0].n_ranks) {
  733. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  734. case 4:
  735. pdodt = single_Q;
  736. if (pdimm[1].n_ranks)
  737. printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
  738. break;
  739. #endif
  740. case 2:
  741. switch (pdimm[1].n_ranks) {
  742. case 2:
  743. pdodt = dual_DD;
  744. break;
  745. case 1:
  746. pdodt = dual_DS;
  747. break;
  748. case 0:
  749. pdodt = dual_D0;
  750. break;
  751. }
  752. break;
  753. case 1:
  754. switch (pdimm[1].n_ranks) {
  755. case 2:
  756. pdodt = dual_SD;
  757. break;
  758. case 1:
  759. pdodt = dual_SS;
  760. break;
  761. case 0:
  762. pdodt = dual_S0;
  763. break;
  764. }
  765. break;
  766. case 0:
  767. switch (pdimm[1].n_ranks) {
  768. case 2:
  769. pdodt = dual_0D;
  770. break;
  771. case 1:
  772. pdodt = dual_0S;
  773. break;
  774. }
  775. break;
  776. }
  777. #endif /* CONFIG_DIMM_SLOTS_PER_CTLR */
  778. #endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */
  779. /* Pick chip-select local options. */
  780. for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
  781. #if defined(CONFIG_SYS_FSL_DDR3) || \
  782. defined(CONFIG_SYS_FSL_DDR2) || \
  783. defined(CONFIG_SYS_FSL_DDR4)
  784. popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
  785. popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
  786. popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
  787. popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
  788. #else
  789. popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
  790. popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
  791. #endif
  792. popts->cs_local_opts[i].auto_precharge = 0;
  793. }
  794. /* Pick interleaving mode. */
  795. /*
  796. * 0 = no interleaving
  797. * 1 = interleaving between 2 controllers
  798. */
  799. popts->memctl_interleaving = 0;
  800. /*
  801. * 0 = cacheline
  802. * 1 = page
  803. * 2 = (logical) bank
  804. * 3 = superbank (only if CS interleaving is enabled)
  805. */
  806. popts->memctl_interleaving_mode = 0;
  807. /*
  808. * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
  809. * 1: page: bit to the left of the column bits selects the memctl
  810. * 2: bank: bit to the left of the bank bits selects the memctl
  811. * 3: superbank: bit to the left of the chip select selects the memctl
  812. *
  813. * NOTE: ba_intlv (rank interleaving) is independent of memory
  814. * controller interleaving; it is only within a memory controller.
  815. * Must use superbank interleaving if rank interleaving is used and
  816. * memory controller interleaving is enabled.
  817. */
  818. /*
  819. * 0 = no
  820. * 0x40 = CS0,CS1
  821. * 0x20 = CS2,CS3
  822. * 0x60 = CS0,CS1 + CS2,CS3
  823. * 0x04 = CS0,CS1,CS2,CS3
  824. */
  825. popts->ba_intlv_ctl = 0;
  826. /* Memory Organization Parameters */
  827. popts->registered_dimm_en = common_dimm->all_dimms_registered;
  828. /* Operational Mode Paramters */
  829. /* Pick ECC modes */
  830. popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */
  831. #ifdef CONFIG_DDR_ECC
  832. if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
  833. if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
  834. popts->ecc_mode = 1;
  835. } else
  836. popts->ecc_mode = 1;
  837. #endif
  838. /* 1 = use memory controler to init data */
  839. popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
  840. /*
  841. * Choose DQS config
  842. * 0 for DDR1
  843. * 1 for DDR2
  844. */
  845. #if defined(CONFIG_SYS_FSL_DDR1)
  846. popts->dqs_config = 0;
  847. #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
  848. popts->dqs_config = 1;
  849. #endif
  850. /* Choose self-refresh during sleep. */
  851. popts->self_refresh_in_sleep = 1;
  852. /* Choose dynamic power management mode. */
  853. popts->dynamic_power = 0;
  854. /*
  855. * check first dimm for primary sdram width
  856. * presuming all dimms are similar
  857. * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
  858. */
  859. #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
  860. if (pdimm[0].n_ranks != 0) {
  861. if ((pdimm[0].data_width >= 64) && \
  862. (pdimm[0].data_width <= 72))
  863. popts->data_bus_width = 0;
  864. else if ((pdimm[0].data_width >= 32) && \
  865. (pdimm[0].data_width <= 40))
  866. popts->data_bus_width = 1;
  867. else {
  868. panic("Error: data width %u is invalid!\n",
  869. pdimm[0].data_width);
  870. }
  871. }
  872. #else
  873. if (pdimm[0].n_ranks != 0) {
  874. if (pdimm[0].primary_sdram_width == 64)
  875. popts->data_bus_width = 0;
  876. else if (pdimm[0].primary_sdram_width == 32)
  877. popts->data_bus_width = 1;
  878. else if (pdimm[0].primary_sdram_width == 16)
  879. popts->data_bus_width = 2;
  880. else {
  881. panic("Error: primary sdram width %u is invalid!\n",
  882. pdimm[0].primary_sdram_width);
  883. }
  884. }
  885. #endif
  886. popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
  887. /* Choose burst length. */
  888. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  889. #if defined(CONFIG_E500MC)
  890. popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */
  891. popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
  892. #else
  893. if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
  894. /* 32-bit or 16-bit bus */
  895. popts->otf_burst_chop_en = 0;
  896. popts->burst_length = DDR_BL8;
  897. } else {
  898. popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
  899. popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
  900. }
  901. #endif
  902. #else
  903. popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
  904. #endif
  905. /* Choose ddr controller address mirror mode */
  906. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  907. for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
  908. if (pdimm[i].n_ranks) {
  909. popts->mirrored_dimm = pdimm[i].mirrored_dimm;
  910. break;
  911. }
  912. }
  913. #endif
  914. /* Global Timing Parameters. */
  915. debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
  916. /* Pick a caslat override. */
  917. popts->cas_latency_override = 0;
  918. popts->cas_latency_override_value = 3;
  919. if (popts->cas_latency_override) {
  920. debug("using caslat override value = %u\n",
  921. popts->cas_latency_override_value);
  922. }
  923. /* Decide whether to use the computed derated latency */
  924. popts->use_derated_caslat = 0;
  925. /* Choose an additive latency. */
  926. popts->additive_latency_override = 0;
  927. popts->additive_latency_override_value = 3;
  928. if (popts->additive_latency_override) {
  929. debug("using additive latency override value = %u\n",
  930. popts->additive_latency_override_value);
  931. }
  932. /*
  933. * 2T_EN setting
  934. *
  935. * Factors to consider for 2T_EN:
  936. * - number of DIMMs installed
  937. * - number of components, number of active ranks
  938. * - how much time you want to spend playing around
  939. */
  940. popts->twot_en = 0;
  941. popts->threet_en = 0;
  942. /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
  943. if (popts->registered_dimm_en)
  944. popts->ap_en = 1; /* 0 = disable, 1 = enable */
  945. else
  946. popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
  947. if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
  948. if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
  949. if (popts->registered_dimm_en ||
  950. (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
  951. popts->ap_en = 1;
  952. }
  953. }
  954. /*
  955. * BSTTOPRE precharge interval
  956. *
  957. * Set this to 0 for global auto precharge
  958. * The value of 0x100 has been used for DDR1, DDR2, DDR3.
  959. * It is not wrong. Any value should be OK. The performance depends on
  960. * applications. There is no one good value for all. One way to set
  961. * is to use 1/4 of refint value.
  962. */
  963. popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
  964. >> 2;
  965. /*
  966. * Window for four activates -- tFAW
  967. *
  968. * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
  969. * FIXME: varies depending upon number of column addresses or data
  970. * FIXME: width, was considering looking at pdimm->primary_sdram_width
  971. */
  972. #if defined(CONFIG_SYS_FSL_DDR1)
  973. popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
  974. #elif defined(CONFIG_SYS_FSL_DDR2)
  975. /*
  976. * x4/x8; some datasheets have 35000
  977. * x16 wide columns only? Use 50000?
  978. */
  979. popts->tfaw_window_four_activates_ps = 37500;
  980. #else
  981. popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
  982. #endif
  983. popts->zq_en = 0;
  984. popts->wrlvl_en = 0;
  985. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  986. /*
  987. * due to ddr3 dimm is fly-by topology
  988. * we suggest to enable write leveling to
  989. * meet the tQDSS under different loading.
  990. */
  991. popts->wrlvl_en = 1;
  992. popts->zq_en = 1;
  993. popts->wrlvl_override = 0;
  994. #endif
  995. /*
  996. * Check interleaving configuration from environment.
  997. * Please refer to doc/README.fsl-ddr for the detail.
  998. *
  999. * If memory controller interleaving is enabled, then the data
  1000. * bus widths must be programmed identically for all memory controllers.
  1001. *
  1002. * Attempt to set all controllers to the same chip select
  1003. * interleaving mode. It will do a best effort to get the
  1004. * requested ranks interleaved together such that the result
  1005. * should be a subset of the requested configuration.
  1006. *
  1007. * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
  1008. * with 256 Byte is enabled.
  1009. */
  1010. #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
  1011. if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
  1012. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1013. ;
  1014. #else
  1015. goto done;
  1016. #endif
  1017. if (pdimm[0].n_ranks == 0) {
  1018. printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
  1019. popts->memctl_interleaving = 0;
  1020. goto done;
  1021. }
  1022. popts->memctl_interleaving = 1;
  1023. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1024. popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
  1025. popts->memctl_interleaving = 1;
  1026. debug("256 Byte interleaving\n");
  1027. #else
  1028. /*
  1029. * test null first. if CONFIG_HWCONFIG is not defined
  1030. * hwconfig_arg_cmp returns non-zero
  1031. */
  1032. if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
  1033. "null", buf)) {
  1034. popts->memctl_interleaving = 0;
  1035. debug("memory controller interleaving disabled.\n");
  1036. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1037. "ctlr_intlv",
  1038. "cacheline", buf)) {
  1039. popts->memctl_interleaving_mode =
  1040. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1041. 0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
  1042. popts->memctl_interleaving =
  1043. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1044. 0 : 1;
  1045. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1046. "ctlr_intlv",
  1047. "page", buf)) {
  1048. popts->memctl_interleaving_mode =
  1049. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1050. 0 : FSL_DDR_PAGE_INTERLEAVING;
  1051. popts->memctl_interleaving =
  1052. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1053. 0 : 1;
  1054. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1055. "ctlr_intlv",
  1056. "bank", buf)) {
  1057. popts->memctl_interleaving_mode =
  1058. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1059. 0 : FSL_DDR_BANK_INTERLEAVING;
  1060. popts->memctl_interleaving =
  1061. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1062. 0 : 1;
  1063. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1064. "ctlr_intlv",
  1065. "superbank", buf)) {
  1066. popts->memctl_interleaving_mode =
  1067. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1068. 0 : FSL_DDR_SUPERBANK_INTERLEAVING;
  1069. popts->memctl_interleaving =
  1070. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1071. 0 : 1;
  1072. #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
  1073. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1074. "ctlr_intlv",
  1075. "3way_1KB", buf)) {
  1076. popts->memctl_interleaving_mode =
  1077. FSL_DDR_3WAY_1KB_INTERLEAVING;
  1078. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1079. "ctlr_intlv",
  1080. "3way_4KB", buf)) {
  1081. popts->memctl_interleaving_mode =
  1082. FSL_DDR_3WAY_4KB_INTERLEAVING;
  1083. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1084. "ctlr_intlv",
  1085. "3way_8KB", buf)) {
  1086. popts->memctl_interleaving_mode =
  1087. FSL_DDR_3WAY_8KB_INTERLEAVING;
  1088. #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
  1089. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1090. "ctlr_intlv",
  1091. "4way_1KB", buf)) {
  1092. popts->memctl_interleaving_mode =
  1093. FSL_DDR_4WAY_1KB_INTERLEAVING;
  1094. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1095. "ctlr_intlv",
  1096. "4way_4KB", buf)) {
  1097. popts->memctl_interleaving_mode =
  1098. FSL_DDR_4WAY_4KB_INTERLEAVING;
  1099. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1100. "ctlr_intlv",
  1101. "4way_8KB", buf)) {
  1102. popts->memctl_interleaving_mode =
  1103. FSL_DDR_4WAY_8KB_INTERLEAVING;
  1104. #endif
  1105. } else {
  1106. popts->memctl_interleaving = 0;
  1107. printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
  1108. }
  1109. #endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */
  1110. done:
  1111. #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
  1112. if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
  1113. (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
  1114. /* test null first. if CONFIG_HWCONFIG is not defined,
  1115. * hwconfig_subarg_cmp_f returns non-zero */
  1116. if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1117. "null", buf))
  1118. debug("bank interleaving disabled.\n");
  1119. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1120. "cs0_cs1", buf))
  1121. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
  1122. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1123. "cs2_cs3", buf))
  1124. popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
  1125. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1126. "cs0_cs1_and_cs2_cs3", buf))
  1127. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
  1128. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1129. "cs0_cs1_cs2_cs3", buf))
  1130. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
  1131. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1132. "auto", buf))
  1133. popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
  1134. else
  1135. printf("hwconfig has unrecognized parameter for bank_intlv.\n");
  1136. switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
  1137. case FSL_DDR_CS0_CS1_CS2_CS3:
  1138. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1139. if (pdimm[0].n_ranks < 4) {
  1140. popts->ba_intlv_ctl = 0;
  1141. printf("Not enough bank(chip-select) for "
  1142. "CS0+CS1+CS2+CS3 on controller %d, "
  1143. "interleaving disabled!\n", ctrl_num);
  1144. }
  1145. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1146. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  1147. if (pdimm[0].n_ranks == 4)
  1148. break;
  1149. #endif
  1150. if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
  1151. popts->ba_intlv_ctl = 0;
  1152. printf("Not enough bank(chip-select) for "
  1153. "CS0+CS1+CS2+CS3 on controller %d, "
  1154. "interleaving disabled!\n", ctrl_num);
  1155. }
  1156. if (pdimm[0].capacity != pdimm[1].capacity) {
  1157. popts->ba_intlv_ctl = 0;
  1158. printf("Not identical DIMM size for "
  1159. "CS0+CS1+CS2+CS3 on controller %d, "
  1160. "interleaving disabled!\n", ctrl_num);
  1161. }
  1162. #endif
  1163. break;
  1164. case FSL_DDR_CS0_CS1:
  1165. if (pdimm[0].n_ranks < 2) {
  1166. popts->ba_intlv_ctl = 0;
  1167. printf("Not enough bank(chip-select) for "
  1168. "CS0+CS1 on controller %d, "
  1169. "interleaving disabled!\n", ctrl_num);
  1170. }
  1171. break;
  1172. case FSL_DDR_CS2_CS3:
  1173. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1174. if (pdimm[0].n_ranks < 4) {
  1175. popts->ba_intlv_ctl = 0;
  1176. printf("Not enough bank(chip-select) for CS2+CS3 "
  1177. "on controller %d, interleaving disabled!\n", ctrl_num);
  1178. }
  1179. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1180. if (pdimm[1].n_ranks < 2) {
  1181. popts->ba_intlv_ctl = 0;
  1182. printf("Not enough bank(chip-select) for CS2+CS3 "
  1183. "on controller %d, interleaving disabled!\n", ctrl_num);
  1184. }
  1185. #endif
  1186. break;
  1187. case FSL_DDR_CS0_CS1_AND_CS2_CS3:
  1188. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1189. if (pdimm[0].n_ranks < 4) {
  1190. popts->ba_intlv_ctl = 0;
  1191. printf("Not enough bank(CS) for CS0+CS1 and "
  1192. "CS2+CS3 on controller %d, "
  1193. "interleaving disabled!\n", ctrl_num);
  1194. }
  1195. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1196. if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
  1197. popts->ba_intlv_ctl = 0;
  1198. printf("Not enough bank(CS) for CS0+CS1 and "
  1199. "CS2+CS3 on controller %d, "
  1200. "interleaving disabled!\n", ctrl_num);
  1201. }
  1202. #endif
  1203. break;
  1204. default:
  1205. popts->ba_intlv_ctl = 0;
  1206. break;
  1207. }
  1208. }
  1209. if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
  1210. if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
  1211. popts->addr_hash = 0;
  1212. else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
  1213. "true", buf))
  1214. popts->addr_hash = 1;
  1215. }
  1216. if (pdimm[0].n_ranks == 4)
  1217. popts->quad_rank_present = 1;
  1218. popts->package_3ds = pdimm->package_3ds;
  1219. #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
  1220. ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
  1221. if (popts->registered_dimm_en) {
  1222. popts->rcw_override = 1;
  1223. popts->rcw_1 = 0x000a5a00;
  1224. if (ddr_freq <= 800)
  1225. popts->rcw_2 = 0x00000000;
  1226. else if (ddr_freq <= 1066)
  1227. popts->rcw_2 = 0x00100000;
  1228. else if (ddr_freq <= 1333)
  1229. popts->rcw_2 = 0x00200000;
  1230. else
  1231. popts->rcw_2 = 0x00300000;
  1232. }
  1233. #endif
  1234. fsl_ddr_board_options(popts, pdimm, ctrl_num);
  1235. return 0;
  1236. }
  1237. void check_interleaving_options(fsl_ddr_info_t *pinfo)
  1238. {
  1239. int i, j, k, check_n_ranks, intlv_invalid = 0;
  1240. unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
  1241. unsigned long long check_rank_density;
  1242. struct dimm_params_s *dimm;
  1243. int first_ctrl = pinfo->first_ctrl;
  1244. int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
  1245. /*
  1246. * Check if all controllers are configured for memory
  1247. * controller interleaving. Identical dimms are recommended. At least
  1248. * the size, row and col address should be checked.
  1249. */
  1250. j = 0;
  1251. check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
  1252. check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
  1253. check_n_row_addr = pinfo->dimm_params[first_ctrl][0].n_row_addr;
  1254. check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
  1255. check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
  1256. for (i = first_ctrl; i <= last_ctrl; i++) {
  1257. dimm = &pinfo->dimm_params[i][0];
  1258. if (!pinfo->memctl_opts[i].memctl_interleaving) {
  1259. continue;
  1260. } else if (((check_rank_density != dimm->rank_density) ||
  1261. (check_n_ranks != dimm->n_ranks) ||
  1262. (check_n_row_addr != dimm->n_row_addr) ||
  1263. (check_n_col_addr != dimm->n_col_addr) ||
  1264. (check_intlv !=
  1265. pinfo->memctl_opts[i].memctl_interleaving_mode))){
  1266. intlv_invalid = 1;
  1267. break;
  1268. } else {
  1269. j++;
  1270. }
  1271. }
  1272. if (intlv_invalid) {
  1273. for (i = first_ctrl; i <= last_ctrl; i++)
  1274. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1275. printf("Not all DIMMs are identical. "
  1276. "Memory controller interleaving disabled.\n");
  1277. } else {
  1278. switch (check_intlv) {
  1279. case FSL_DDR_256B_INTERLEAVING:
  1280. case FSL_DDR_CACHE_LINE_INTERLEAVING:
  1281. case FSL_DDR_PAGE_INTERLEAVING:
  1282. case FSL_DDR_BANK_INTERLEAVING:
  1283. case FSL_DDR_SUPERBANK_INTERLEAVING:
  1284. #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
  1285. k = 2;
  1286. #else
  1287. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1288. #endif
  1289. break;
  1290. case FSL_DDR_3WAY_1KB_INTERLEAVING:
  1291. case FSL_DDR_3WAY_4KB_INTERLEAVING:
  1292. case FSL_DDR_3WAY_8KB_INTERLEAVING:
  1293. case FSL_DDR_4WAY_1KB_INTERLEAVING:
  1294. case FSL_DDR_4WAY_4KB_INTERLEAVING:
  1295. case FSL_DDR_4WAY_8KB_INTERLEAVING:
  1296. default:
  1297. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1298. break;
  1299. }
  1300. debug("%d of %d controllers are interleaving.\n", j, k);
  1301. if (j && (j != k)) {
  1302. for (i = first_ctrl; i <= last_ctrl; i++)
  1303. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1304. if ((last_ctrl - first_ctrl) > 1)
  1305. puts("Not all controllers have compatible interleaving mode. All disabled.\n");
  1306. }
  1307. }
  1308. debug("Checking interleaving options completed\n");
  1309. }
  1310. int fsl_use_spd(void)
  1311. {
  1312. int use_spd = 0;
  1313. #ifdef CONFIG_DDR_SPD
  1314. char buf[HWCONFIG_BUFFER_SIZE];
  1315. /*
  1316. * Extract hwconfig from environment since we have not properly setup
  1317. * the environment but need it for ddr config params
  1318. */
  1319. if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
  1320. buf[0] = '\0';
  1321. /* if hwconfig is not enabled, or "sdram" is not defined, use spd */
  1322. if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
  1323. if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
  1324. use_spd = 1;
  1325. else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
  1326. "fixed", buf))
  1327. use_spd = 0;
  1328. else
  1329. use_spd = 1;
  1330. } else
  1331. use_spd = 1;
  1332. #endif
  1333. return use_spd;
  1334. }