options.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
  4. * Copyright 2017-2018 NXP Semiconductor
  5. */
  6. #include <common.h>
  7. #include <hwconfig.h>
  8. #include <fsl_ddr_sdram.h>
  9. #include <fsl_ddr.h>
  10. #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
  11. defined(CONFIG_ARM)
  12. #include <asm/arch/clock.h>
  13. #endif
  14. /*
  15. * Use our own stack based buffer before relocation to allow accessing longer
  16. * hwconfig strings that might be in the environment before we've relocated.
  17. * This is pretty fragile on both the use of stack and if the buffer is big
  18. * enough. However we will get a warning from env_get_f() for the latter.
  19. */
  20. /* Board-specific functions defined in each board's ddr.c */
  21. extern void fsl_ddr_board_options(memctl_options_t *popts,
  22. dimm_params_t *pdimm,
  23. unsigned int ctrl_num);
  24. struct dynamic_odt {
  25. unsigned int odt_rd_cfg;
  26. unsigned int odt_wr_cfg;
  27. unsigned int odt_rtt_norm;
  28. unsigned int odt_rtt_wr;
  29. };
  30. #ifdef CONFIG_SYS_FSL_DDR4
  31. /* Quad rank is not verified yet due availability.
  32. * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
  33. */
  34. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  35. { /* cs0 */
  36. FSL_DDR_ODT_NEVER,
  37. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  38. DDR4_RTT_34_OHM, /* unverified */
  39. DDR4_RTT_120_OHM
  40. },
  41. { /* cs1 */
  42. FSL_DDR_ODT_NEVER,
  43. FSL_DDR_ODT_NEVER,
  44. DDR4_RTT_OFF,
  45. DDR4_RTT_120_OHM
  46. },
  47. { /* cs2 */
  48. FSL_DDR_ODT_NEVER,
  49. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  50. DDR4_RTT_34_OHM,
  51. DDR4_RTT_120_OHM
  52. },
  53. { /* cs3 */
  54. FSL_DDR_ODT_NEVER,
  55. FSL_DDR_ODT_NEVER, /* tied high */
  56. DDR4_RTT_OFF,
  57. DDR4_RTT_120_OHM
  58. }
  59. };
  60. static __maybe_unused const struct dynamic_odt single_D[4] = {
  61. { /* cs0 */
  62. FSL_DDR_ODT_NEVER,
  63. FSL_DDR_ODT_ALL,
  64. DDR4_RTT_40_OHM,
  65. DDR4_RTT_OFF
  66. },
  67. { /* cs1 */
  68. FSL_DDR_ODT_NEVER,
  69. FSL_DDR_ODT_NEVER,
  70. DDR4_RTT_OFF,
  71. DDR4_RTT_OFF
  72. },
  73. {0, 0, 0, 0},
  74. {0, 0, 0, 0}
  75. };
  76. static __maybe_unused const struct dynamic_odt single_S[4] = {
  77. { /* cs0 */
  78. FSL_DDR_ODT_NEVER,
  79. FSL_DDR_ODT_ALL,
  80. DDR4_RTT_40_OHM,
  81. DDR4_RTT_OFF
  82. },
  83. {0, 0, 0, 0},
  84. {0, 0, 0, 0},
  85. {0, 0, 0, 0},
  86. };
  87. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  88. { /* cs0 */
  89. FSL_DDR_ODT_NEVER,
  90. FSL_DDR_ODT_SAME_DIMM,
  91. DDR4_RTT_120_OHM,
  92. DDR4_RTT_OFF
  93. },
  94. { /* cs1 */
  95. FSL_DDR_ODT_OTHER_DIMM,
  96. FSL_DDR_ODT_OTHER_DIMM,
  97. DDR4_RTT_34_OHM,
  98. DDR4_RTT_OFF
  99. },
  100. { /* cs2 */
  101. FSL_DDR_ODT_NEVER,
  102. FSL_DDR_ODT_SAME_DIMM,
  103. DDR4_RTT_120_OHM,
  104. DDR4_RTT_OFF
  105. },
  106. { /* cs3 */
  107. FSL_DDR_ODT_OTHER_DIMM,
  108. FSL_DDR_ODT_OTHER_DIMM,
  109. DDR4_RTT_34_OHM,
  110. DDR4_RTT_OFF
  111. }
  112. };
  113. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  114. { /* cs0 */
  115. FSL_DDR_ODT_NEVER,
  116. FSL_DDR_ODT_SAME_DIMM,
  117. DDR4_RTT_120_OHM,
  118. DDR4_RTT_OFF
  119. },
  120. { /* cs1 */
  121. FSL_DDR_ODT_OTHER_DIMM,
  122. FSL_DDR_ODT_OTHER_DIMM,
  123. DDR4_RTT_34_OHM,
  124. DDR4_RTT_OFF
  125. },
  126. { /* cs2 */
  127. FSL_DDR_ODT_OTHER_DIMM,
  128. FSL_DDR_ODT_ALL,
  129. DDR4_RTT_34_OHM,
  130. DDR4_RTT_120_OHM
  131. },
  132. {0, 0, 0, 0}
  133. };
  134. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  135. { /* cs0 */
  136. FSL_DDR_ODT_OTHER_DIMM,
  137. FSL_DDR_ODT_ALL,
  138. DDR4_RTT_34_OHM,
  139. DDR4_RTT_120_OHM
  140. },
  141. {0, 0, 0, 0},
  142. { /* cs2 */
  143. FSL_DDR_ODT_NEVER,
  144. FSL_DDR_ODT_SAME_DIMM,
  145. DDR4_RTT_120_OHM,
  146. DDR4_RTT_OFF
  147. },
  148. { /* cs3 */
  149. FSL_DDR_ODT_OTHER_DIMM,
  150. FSL_DDR_ODT_OTHER_DIMM,
  151. DDR4_RTT_34_OHM,
  152. DDR4_RTT_OFF
  153. }
  154. };
  155. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  156. { /* cs0 */
  157. FSL_DDR_ODT_OTHER_DIMM,
  158. FSL_DDR_ODT_ALL,
  159. DDR4_RTT_34_OHM,
  160. DDR4_RTT_120_OHM
  161. },
  162. {0, 0, 0, 0},
  163. { /* cs2 */
  164. FSL_DDR_ODT_OTHER_DIMM,
  165. FSL_DDR_ODT_ALL,
  166. DDR4_RTT_34_OHM,
  167. DDR4_RTT_120_OHM
  168. },
  169. {0, 0, 0, 0}
  170. };
  171. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  172. { /* cs0 */
  173. FSL_DDR_ODT_NEVER,
  174. FSL_DDR_ODT_SAME_DIMM,
  175. DDR4_RTT_40_OHM,
  176. DDR4_RTT_OFF
  177. },
  178. { /* cs1 */
  179. FSL_DDR_ODT_NEVER,
  180. FSL_DDR_ODT_NEVER,
  181. DDR4_RTT_OFF,
  182. DDR4_RTT_OFF
  183. },
  184. {0, 0, 0, 0},
  185. {0, 0, 0, 0}
  186. };
  187. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  188. {0, 0, 0, 0},
  189. {0, 0, 0, 0},
  190. { /* cs2 */
  191. FSL_DDR_ODT_NEVER,
  192. FSL_DDR_ODT_SAME_DIMM,
  193. DDR4_RTT_40_OHM,
  194. DDR4_RTT_OFF
  195. },
  196. { /* cs3 */
  197. FSL_DDR_ODT_NEVER,
  198. FSL_DDR_ODT_NEVER,
  199. DDR4_RTT_OFF,
  200. DDR4_RTT_OFF
  201. }
  202. };
  203. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  204. { /* cs0 */
  205. FSL_DDR_ODT_NEVER,
  206. FSL_DDR_ODT_CS,
  207. DDR4_RTT_40_OHM,
  208. DDR4_RTT_OFF
  209. },
  210. {0, 0, 0, 0},
  211. {0, 0, 0, 0},
  212. {0, 0, 0, 0}
  213. };
  214. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  215. {0, 0, 0, 0},
  216. {0, 0, 0, 0},
  217. { /* cs2 */
  218. FSL_DDR_ODT_NEVER,
  219. FSL_DDR_ODT_CS,
  220. DDR4_RTT_40_OHM,
  221. DDR4_RTT_OFF
  222. },
  223. {0, 0, 0, 0}
  224. };
  225. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  226. { /* cs0 */
  227. FSL_DDR_ODT_NEVER,
  228. FSL_DDR_ODT_CS,
  229. DDR4_RTT_120_OHM,
  230. DDR4_RTT_OFF
  231. },
  232. { /* cs1 */
  233. FSL_DDR_ODT_NEVER,
  234. FSL_DDR_ODT_CS,
  235. DDR4_RTT_120_OHM,
  236. DDR4_RTT_OFF
  237. },
  238. { /* cs2 */
  239. FSL_DDR_ODT_NEVER,
  240. FSL_DDR_ODT_CS,
  241. DDR4_RTT_120_OHM,
  242. DDR4_RTT_OFF
  243. },
  244. { /* cs3 */
  245. FSL_DDR_ODT_NEVER,
  246. FSL_DDR_ODT_CS,
  247. DDR4_RTT_120_OHM,
  248. DDR4_RTT_OFF
  249. }
  250. };
  251. #elif defined(CONFIG_SYS_FSL_DDR3)
  252. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  253. { /* cs0 */
  254. FSL_DDR_ODT_NEVER,
  255. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  256. DDR3_RTT_20_OHM,
  257. DDR3_RTT_120_OHM
  258. },
  259. { /* cs1 */
  260. FSL_DDR_ODT_NEVER,
  261. FSL_DDR_ODT_NEVER, /* tied high */
  262. DDR3_RTT_OFF,
  263. DDR3_RTT_120_OHM
  264. },
  265. { /* cs2 */
  266. FSL_DDR_ODT_NEVER,
  267. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  268. DDR3_RTT_20_OHM,
  269. DDR3_RTT_120_OHM
  270. },
  271. { /* cs3 */
  272. FSL_DDR_ODT_NEVER,
  273. FSL_DDR_ODT_NEVER, /* tied high */
  274. DDR3_RTT_OFF,
  275. DDR3_RTT_120_OHM
  276. }
  277. };
  278. static __maybe_unused const struct dynamic_odt single_D[4] = {
  279. { /* cs0 */
  280. FSL_DDR_ODT_NEVER,
  281. FSL_DDR_ODT_ALL,
  282. DDR3_RTT_40_OHM,
  283. DDR3_RTT_OFF
  284. },
  285. { /* cs1 */
  286. FSL_DDR_ODT_NEVER,
  287. FSL_DDR_ODT_NEVER,
  288. DDR3_RTT_OFF,
  289. DDR3_RTT_OFF
  290. },
  291. {0, 0, 0, 0},
  292. {0, 0, 0, 0}
  293. };
  294. static __maybe_unused const struct dynamic_odt single_S[4] = {
  295. { /* cs0 */
  296. FSL_DDR_ODT_NEVER,
  297. FSL_DDR_ODT_ALL,
  298. DDR3_RTT_40_OHM,
  299. DDR3_RTT_OFF
  300. },
  301. {0, 0, 0, 0},
  302. {0, 0, 0, 0},
  303. {0, 0, 0, 0},
  304. };
  305. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  306. { /* cs0 */
  307. FSL_DDR_ODT_NEVER,
  308. FSL_DDR_ODT_SAME_DIMM,
  309. DDR3_RTT_120_OHM,
  310. DDR3_RTT_OFF
  311. },
  312. { /* cs1 */
  313. FSL_DDR_ODT_OTHER_DIMM,
  314. FSL_DDR_ODT_OTHER_DIMM,
  315. DDR3_RTT_30_OHM,
  316. DDR3_RTT_OFF
  317. },
  318. { /* cs2 */
  319. FSL_DDR_ODT_NEVER,
  320. FSL_DDR_ODT_SAME_DIMM,
  321. DDR3_RTT_120_OHM,
  322. DDR3_RTT_OFF
  323. },
  324. { /* cs3 */
  325. FSL_DDR_ODT_OTHER_DIMM,
  326. FSL_DDR_ODT_OTHER_DIMM,
  327. DDR3_RTT_30_OHM,
  328. DDR3_RTT_OFF
  329. }
  330. };
  331. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  332. { /* cs0 */
  333. FSL_DDR_ODT_NEVER,
  334. FSL_DDR_ODT_SAME_DIMM,
  335. DDR3_RTT_120_OHM,
  336. DDR3_RTT_OFF
  337. },
  338. { /* cs1 */
  339. FSL_DDR_ODT_OTHER_DIMM,
  340. FSL_DDR_ODT_OTHER_DIMM,
  341. DDR3_RTT_30_OHM,
  342. DDR3_RTT_OFF
  343. },
  344. { /* cs2 */
  345. FSL_DDR_ODT_OTHER_DIMM,
  346. FSL_DDR_ODT_ALL,
  347. DDR3_RTT_20_OHM,
  348. DDR3_RTT_120_OHM
  349. },
  350. {0, 0, 0, 0}
  351. };
  352. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  353. { /* cs0 */
  354. FSL_DDR_ODT_OTHER_DIMM,
  355. FSL_DDR_ODT_ALL,
  356. DDR3_RTT_20_OHM,
  357. DDR3_RTT_120_OHM
  358. },
  359. {0, 0, 0, 0},
  360. { /* cs2 */
  361. FSL_DDR_ODT_NEVER,
  362. FSL_DDR_ODT_SAME_DIMM,
  363. DDR3_RTT_120_OHM,
  364. DDR3_RTT_OFF
  365. },
  366. { /* cs3 */
  367. FSL_DDR_ODT_OTHER_DIMM,
  368. FSL_DDR_ODT_OTHER_DIMM,
  369. DDR3_RTT_20_OHM,
  370. DDR3_RTT_OFF
  371. }
  372. };
  373. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  374. { /* cs0 */
  375. FSL_DDR_ODT_OTHER_DIMM,
  376. FSL_DDR_ODT_ALL,
  377. DDR3_RTT_30_OHM,
  378. DDR3_RTT_120_OHM
  379. },
  380. {0, 0, 0, 0},
  381. { /* cs2 */
  382. FSL_DDR_ODT_OTHER_DIMM,
  383. FSL_DDR_ODT_ALL,
  384. DDR3_RTT_30_OHM,
  385. DDR3_RTT_120_OHM
  386. },
  387. {0, 0, 0, 0}
  388. };
  389. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  390. { /* cs0 */
  391. FSL_DDR_ODT_NEVER,
  392. FSL_DDR_ODT_SAME_DIMM,
  393. DDR3_RTT_40_OHM,
  394. DDR3_RTT_OFF
  395. },
  396. { /* cs1 */
  397. FSL_DDR_ODT_NEVER,
  398. FSL_DDR_ODT_NEVER,
  399. DDR3_RTT_OFF,
  400. DDR3_RTT_OFF
  401. },
  402. {0, 0, 0, 0},
  403. {0, 0, 0, 0}
  404. };
  405. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  406. {0, 0, 0, 0},
  407. {0, 0, 0, 0},
  408. { /* cs2 */
  409. FSL_DDR_ODT_NEVER,
  410. FSL_DDR_ODT_SAME_DIMM,
  411. DDR3_RTT_40_OHM,
  412. DDR3_RTT_OFF
  413. },
  414. { /* cs3 */
  415. FSL_DDR_ODT_NEVER,
  416. FSL_DDR_ODT_NEVER,
  417. DDR3_RTT_OFF,
  418. DDR3_RTT_OFF
  419. }
  420. };
  421. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  422. { /* cs0 */
  423. FSL_DDR_ODT_NEVER,
  424. FSL_DDR_ODT_CS,
  425. DDR3_RTT_40_OHM,
  426. DDR3_RTT_OFF
  427. },
  428. {0, 0, 0, 0},
  429. {0, 0, 0, 0},
  430. {0, 0, 0, 0}
  431. };
  432. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  433. {0, 0, 0, 0},
  434. {0, 0, 0, 0},
  435. { /* cs2 */
  436. FSL_DDR_ODT_NEVER,
  437. FSL_DDR_ODT_CS,
  438. DDR3_RTT_40_OHM,
  439. DDR3_RTT_OFF
  440. },
  441. {0, 0, 0, 0}
  442. };
  443. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  444. { /* cs0 */
  445. FSL_DDR_ODT_NEVER,
  446. FSL_DDR_ODT_CS,
  447. DDR3_RTT_120_OHM,
  448. DDR3_RTT_OFF
  449. },
  450. { /* cs1 */
  451. FSL_DDR_ODT_NEVER,
  452. FSL_DDR_ODT_CS,
  453. DDR3_RTT_120_OHM,
  454. DDR3_RTT_OFF
  455. },
  456. { /* cs2 */
  457. FSL_DDR_ODT_NEVER,
  458. FSL_DDR_ODT_CS,
  459. DDR3_RTT_120_OHM,
  460. DDR3_RTT_OFF
  461. },
  462. { /* cs3 */
  463. FSL_DDR_ODT_NEVER,
  464. FSL_DDR_ODT_CS,
  465. DDR3_RTT_120_OHM,
  466. DDR3_RTT_OFF
  467. }
  468. };
  469. #else /* CONFIG_SYS_FSL_DDR3 */
  470. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  471. {0, 0, 0, 0},
  472. {0, 0, 0, 0},
  473. {0, 0, 0, 0},
  474. {0, 0, 0, 0}
  475. };
  476. static __maybe_unused const struct dynamic_odt single_D[4] = {
  477. { /* cs0 */
  478. FSL_DDR_ODT_NEVER,
  479. FSL_DDR_ODT_ALL,
  480. DDR2_RTT_150_OHM,
  481. DDR2_RTT_OFF
  482. },
  483. { /* cs1 */
  484. FSL_DDR_ODT_NEVER,
  485. FSL_DDR_ODT_NEVER,
  486. DDR2_RTT_OFF,
  487. DDR2_RTT_OFF
  488. },
  489. {0, 0, 0, 0},
  490. {0, 0, 0, 0}
  491. };
  492. static __maybe_unused const struct dynamic_odt single_S[4] = {
  493. { /* cs0 */
  494. FSL_DDR_ODT_NEVER,
  495. FSL_DDR_ODT_ALL,
  496. DDR2_RTT_150_OHM,
  497. DDR2_RTT_OFF
  498. },
  499. {0, 0, 0, 0},
  500. {0, 0, 0, 0},
  501. {0, 0, 0, 0},
  502. };
  503. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  504. { /* cs0 */
  505. FSL_DDR_ODT_OTHER_DIMM,
  506. FSL_DDR_ODT_OTHER_DIMM,
  507. DDR2_RTT_75_OHM,
  508. DDR2_RTT_OFF
  509. },
  510. { /* cs1 */
  511. FSL_DDR_ODT_NEVER,
  512. FSL_DDR_ODT_NEVER,
  513. DDR2_RTT_OFF,
  514. DDR2_RTT_OFF
  515. },
  516. { /* cs2 */
  517. FSL_DDR_ODT_OTHER_DIMM,
  518. FSL_DDR_ODT_OTHER_DIMM,
  519. DDR2_RTT_75_OHM,
  520. DDR2_RTT_OFF
  521. },
  522. { /* cs3 */
  523. FSL_DDR_ODT_NEVER,
  524. FSL_DDR_ODT_NEVER,
  525. DDR2_RTT_OFF,
  526. DDR2_RTT_OFF
  527. }
  528. };
  529. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  530. { /* cs0 */
  531. FSL_DDR_ODT_OTHER_DIMM,
  532. FSL_DDR_ODT_OTHER_DIMM,
  533. DDR2_RTT_75_OHM,
  534. DDR2_RTT_OFF
  535. },
  536. { /* cs1 */
  537. FSL_DDR_ODT_NEVER,
  538. FSL_DDR_ODT_NEVER,
  539. DDR2_RTT_OFF,
  540. DDR2_RTT_OFF
  541. },
  542. { /* cs2 */
  543. FSL_DDR_ODT_OTHER_DIMM,
  544. FSL_DDR_ODT_OTHER_DIMM,
  545. DDR2_RTT_75_OHM,
  546. DDR2_RTT_OFF
  547. },
  548. {0, 0, 0, 0}
  549. };
  550. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  551. { /* cs0 */
  552. FSL_DDR_ODT_OTHER_DIMM,
  553. FSL_DDR_ODT_OTHER_DIMM,
  554. DDR2_RTT_75_OHM,
  555. DDR2_RTT_OFF
  556. },
  557. {0, 0, 0, 0},
  558. { /* cs2 */
  559. FSL_DDR_ODT_OTHER_DIMM,
  560. FSL_DDR_ODT_OTHER_DIMM,
  561. DDR2_RTT_75_OHM,
  562. DDR2_RTT_OFF
  563. },
  564. { /* cs3 */
  565. FSL_DDR_ODT_NEVER,
  566. FSL_DDR_ODT_NEVER,
  567. DDR2_RTT_OFF,
  568. DDR2_RTT_OFF
  569. }
  570. };
  571. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  572. { /* cs0 */
  573. FSL_DDR_ODT_OTHER_DIMM,
  574. FSL_DDR_ODT_OTHER_DIMM,
  575. DDR2_RTT_75_OHM,
  576. DDR2_RTT_OFF
  577. },
  578. {0, 0, 0, 0},
  579. { /* cs2 */
  580. FSL_DDR_ODT_OTHER_DIMM,
  581. FSL_DDR_ODT_OTHER_DIMM,
  582. DDR2_RTT_75_OHM,
  583. DDR2_RTT_OFF
  584. },
  585. {0, 0, 0, 0}
  586. };
  587. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  588. { /* cs0 */
  589. FSL_DDR_ODT_NEVER,
  590. FSL_DDR_ODT_ALL,
  591. DDR2_RTT_150_OHM,
  592. DDR2_RTT_OFF
  593. },
  594. { /* cs1 */
  595. FSL_DDR_ODT_NEVER,
  596. FSL_DDR_ODT_NEVER,
  597. DDR2_RTT_OFF,
  598. DDR2_RTT_OFF
  599. },
  600. {0, 0, 0, 0},
  601. {0, 0, 0, 0}
  602. };
  603. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  604. {0, 0, 0, 0},
  605. {0, 0, 0, 0},
  606. { /* cs2 */
  607. FSL_DDR_ODT_NEVER,
  608. FSL_DDR_ODT_ALL,
  609. DDR2_RTT_150_OHM,
  610. DDR2_RTT_OFF
  611. },
  612. { /* cs3 */
  613. FSL_DDR_ODT_NEVER,
  614. FSL_DDR_ODT_NEVER,
  615. DDR2_RTT_OFF,
  616. DDR2_RTT_OFF
  617. }
  618. };
  619. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  620. { /* cs0 */
  621. FSL_DDR_ODT_NEVER,
  622. FSL_DDR_ODT_CS,
  623. DDR2_RTT_150_OHM,
  624. DDR2_RTT_OFF
  625. },
  626. {0, 0, 0, 0},
  627. {0, 0, 0, 0},
  628. {0, 0, 0, 0}
  629. };
  630. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  631. {0, 0, 0, 0},
  632. {0, 0, 0, 0},
  633. { /* cs2 */
  634. FSL_DDR_ODT_NEVER,
  635. FSL_DDR_ODT_CS,
  636. DDR2_RTT_150_OHM,
  637. DDR2_RTT_OFF
  638. },
  639. {0, 0, 0, 0}
  640. };
  641. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  642. { /* cs0 */
  643. FSL_DDR_ODT_NEVER,
  644. FSL_DDR_ODT_CS,
  645. DDR2_RTT_75_OHM,
  646. DDR2_RTT_OFF
  647. },
  648. { /* cs1 */
  649. FSL_DDR_ODT_NEVER,
  650. FSL_DDR_ODT_NEVER,
  651. DDR2_RTT_OFF,
  652. DDR2_RTT_OFF
  653. },
  654. { /* cs2 */
  655. FSL_DDR_ODT_NEVER,
  656. FSL_DDR_ODT_CS,
  657. DDR2_RTT_75_OHM,
  658. DDR2_RTT_OFF
  659. },
  660. { /* cs3 */
  661. FSL_DDR_ODT_NEVER,
  662. FSL_DDR_ODT_NEVER,
  663. DDR2_RTT_OFF,
  664. DDR2_RTT_OFF
  665. }
  666. };
  667. #endif
  668. /*
  669. * Automatically seleect bank interleaving mode based on DIMMs
  670. * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
  671. * This function only deal with one or two slots per controller.
  672. */
  673. static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
  674. {
  675. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  676. if (pdimm[0].n_ranks == 4)
  677. return FSL_DDR_CS0_CS1_CS2_CS3;
  678. else if (pdimm[0].n_ranks == 2)
  679. return FSL_DDR_CS0_CS1;
  680. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  681. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  682. if (pdimm[0].n_ranks == 4)
  683. return FSL_DDR_CS0_CS1_CS2_CS3;
  684. #endif
  685. if (pdimm[0].n_ranks == 2) {
  686. if (pdimm[1].n_ranks == 2)
  687. return FSL_DDR_CS0_CS1_CS2_CS3;
  688. else
  689. return FSL_DDR_CS0_CS1;
  690. }
  691. #endif
  692. return 0;
  693. }
  694. unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
  695. memctl_options_t *popts,
  696. dimm_params_t *pdimm,
  697. unsigned int ctrl_num)
  698. {
  699. unsigned int i;
  700. char buffer[HWCONFIG_BUFFER_SIZE];
  701. char *buf = NULL;
  702. #if defined(CONFIG_SYS_FSL_DDR3) || \
  703. defined(CONFIG_SYS_FSL_DDR2) || \
  704. defined(CONFIG_SYS_FSL_DDR4)
  705. const struct dynamic_odt *pdodt = odt_unknown;
  706. #endif
  707. #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
  708. ulong ddr_freq;
  709. #endif
  710. /*
  711. * Extract hwconfig from environment since we have not properly setup
  712. * the environment but need it for ddr config params
  713. */
  714. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  715. buf = buffer;
  716. #if defined(CONFIG_SYS_FSL_DDR3) || \
  717. defined(CONFIG_SYS_FSL_DDR2) || \
  718. defined(CONFIG_SYS_FSL_DDR4)
  719. /* Chip select options. */
  720. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  721. switch (pdimm[0].n_ranks) {
  722. case 1:
  723. pdodt = single_S;
  724. break;
  725. case 2:
  726. pdodt = single_D;
  727. break;
  728. case 4:
  729. pdodt = single_Q;
  730. break;
  731. }
  732. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  733. switch (pdimm[0].n_ranks) {
  734. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  735. case 4:
  736. pdodt = single_Q;
  737. if (pdimm[1].n_ranks)
  738. printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
  739. break;
  740. #endif
  741. case 2:
  742. switch (pdimm[1].n_ranks) {
  743. case 2:
  744. pdodt = dual_DD;
  745. break;
  746. case 1:
  747. pdodt = dual_DS;
  748. break;
  749. case 0:
  750. pdodt = dual_D0;
  751. break;
  752. }
  753. break;
  754. case 1:
  755. switch (pdimm[1].n_ranks) {
  756. case 2:
  757. pdodt = dual_SD;
  758. break;
  759. case 1:
  760. pdodt = dual_SS;
  761. break;
  762. case 0:
  763. pdodt = dual_S0;
  764. break;
  765. }
  766. break;
  767. case 0:
  768. switch (pdimm[1].n_ranks) {
  769. case 2:
  770. pdodt = dual_0D;
  771. break;
  772. case 1:
  773. pdodt = dual_0S;
  774. break;
  775. }
  776. break;
  777. }
  778. #endif /* CONFIG_DIMM_SLOTS_PER_CTLR */
  779. #endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */
  780. /* Pick chip-select local options. */
  781. for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
  782. #if defined(CONFIG_SYS_FSL_DDR3) || \
  783. defined(CONFIG_SYS_FSL_DDR2) || \
  784. defined(CONFIG_SYS_FSL_DDR4)
  785. popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
  786. popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
  787. popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
  788. popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
  789. #else
  790. popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
  791. popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
  792. #endif
  793. popts->cs_local_opts[i].auto_precharge = 0;
  794. }
  795. /* Pick interleaving mode. */
  796. /*
  797. * 0 = no interleaving
  798. * 1 = interleaving between 2 controllers
  799. */
  800. popts->memctl_interleaving = 0;
  801. /*
  802. * 0 = cacheline
  803. * 1 = page
  804. * 2 = (logical) bank
  805. * 3 = superbank (only if CS interleaving is enabled)
  806. */
  807. popts->memctl_interleaving_mode = 0;
  808. /*
  809. * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
  810. * 1: page: bit to the left of the column bits selects the memctl
  811. * 2: bank: bit to the left of the bank bits selects the memctl
  812. * 3: superbank: bit to the left of the chip select selects the memctl
  813. *
  814. * NOTE: ba_intlv (rank interleaving) is independent of memory
  815. * controller interleaving; it is only within a memory controller.
  816. * Must use superbank interleaving if rank interleaving is used and
  817. * memory controller interleaving is enabled.
  818. */
  819. /*
  820. * 0 = no
  821. * 0x40 = CS0,CS1
  822. * 0x20 = CS2,CS3
  823. * 0x60 = CS0,CS1 + CS2,CS3
  824. * 0x04 = CS0,CS1,CS2,CS3
  825. */
  826. popts->ba_intlv_ctl = 0;
  827. /* Memory Organization Parameters */
  828. popts->registered_dimm_en = common_dimm->all_dimms_registered;
  829. /* Operational Mode Paramters */
  830. /* Pick ECC modes */
  831. popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */
  832. #ifdef CONFIG_DDR_ECC
  833. if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
  834. if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
  835. popts->ecc_mode = 1;
  836. } else
  837. popts->ecc_mode = 1;
  838. #endif
  839. /* 1 = use memory controler to init data */
  840. popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
  841. /*
  842. * Choose DQS config
  843. * 0 for DDR1
  844. * 1 for DDR2
  845. */
  846. #if defined(CONFIG_SYS_FSL_DDR1)
  847. popts->dqs_config = 0;
  848. #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
  849. popts->dqs_config = 1;
  850. #endif
  851. /* Choose self-refresh during sleep. */
  852. popts->self_refresh_in_sleep = 1;
  853. /* Choose dynamic power management mode. */
  854. popts->dynamic_power = 0;
  855. /*
  856. * check first dimm for primary sdram width
  857. * presuming all dimms are similar
  858. * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
  859. */
  860. #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
  861. if (pdimm[0].n_ranks != 0) {
  862. if ((pdimm[0].data_width >= 64) && \
  863. (pdimm[0].data_width <= 72))
  864. popts->data_bus_width = 0;
  865. else if ((pdimm[0].data_width >= 32) && \
  866. (pdimm[0].data_width <= 40))
  867. popts->data_bus_width = 1;
  868. else {
  869. panic("Error: data width %u is invalid!\n",
  870. pdimm[0].data_width);
  871. }
  872. }
  873. #else
  874. if (pdimm[0].n_ranks != 0) {
  875. if (pdimm[0].primary_sdram_width == 64)
  876. popts->data_bus_width = 0;
  877. else if (pdimm[0].primary_sdram_width == 32)
  878. popts->data_bus_width = 1;
  879. else if (pdimm[0].primary_sdram_width == 16)
  880. popts->data_bus_width = 2;
  881. else {
  882. panic("Error: primary sdram width %u is invalid!\n",
  883. pdimm[0].primary_sdram_width);
  884. }
  885. }
  886. #endif
  887. popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
  888. /* Choose burst length. */
  889. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  890. #if defined(CONFIG_E500MC)
  891. popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */
  892. popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
  893. #else
  894. if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
  895. /* 32-bit or 16-bit bus */
  896. popts->otf_burst_chop_en = 0;
  897. popts->burst_length = DDR_BL8;
  898. } else {
  899. popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
  900. popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
  901. }
  902. #endif
  903. #else
  904. popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
  905. #endif
  906. /* Choose ddr controller address mirror mode */
  907. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  908. for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
  909. if (pdimm[i].n_ranks) {
  910. popts->mirrored_dimm = pdimm[i].mirrored_dimm;
  911. break;
  912. }
  913. }
  914. #endif
  915. /* Global Timing Parameters. */
  916. debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
  917. /* Pick a caslat override. */
  918. popts->cas_latency_override = 0;
  919. popts->cas_latency_override_value = 3;
  920. if (popts->cas_latency_override) {
  921. debug("using caslat override value = %u\n",
  922. popts->cas_latency_override_value);
  923. }
  924. /* Decide whether to use the computed derated latency */
  925. popts->use_derated_caslat = 0;
  926. /* Choose an additive latency. */
  927. popts->additive_latency_override = 0;
  928. popts->additive_latency_override_value = 3;
  929. if (popts->additive_latency_override) {
  930. debug("using additive latency override value = %u\n",
  931. popts->additive_latency_override_value);
  932. }
  933. /*
  934. * 2T_EN setting
  935. *
  936. * Factors to consider for 2T_EN:
  937. * - number of DIMMs installed
  938. * - number of components, number of active ranks
  939. * - how much time you want to spend playing around
  940. */
  941. popts->twot_en = 0;
  942. popts->threet_en = 0;
  943. /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
  944. if (popts->registered_dimm_en)
  945. popts->ap_en = 1; /* 0 = disable, 1 = enable */
  946. else
  947. popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
  948. if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
  949. if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
  950. if (popts->registered_dimm_en ||
  951. (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
  952. popts->ap_en = 1;
  953. }
  954. }
  955. /*
  956. * BSTTOPRE precharge interval
  957. *
  958. * Set this to 0 for global auto precharge
  959. * The value of 0x100 has been used for DDR1, DDR2, DDR3.
  960. * It is not wrong. Any value should be OK. The performance depends on
  961. * applications. There is no one good value for all. One way to set
  962. * is to use 1/4 of refint value.
  963. */
  964. popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
  965. >> 2;
  966. /*
  967. * Window for four activates -- tFAW
  968. *
  969. * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
  970. * FIXME: varies depending upon number of column addresses or data
  971. * FIXME: width, was considering looking at pdimm->primary_sdram_width
  972. */
  973. #if defined(CONFIG_SYS_FSL_DDR1)
  974. popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
  975. #elif defined(CONFIG_SYS_FSL_DDR2)
  976. /*
  977. * x4/x8; some datasheets have 35000
  978. * x16 wide columns only? Use 50000?
  979. */
  980. popts->tfaw_window_four_activates_ps = 37500;
  981. #else
  982. popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
  983. #endif
  984. popts->zq_en = 0;
  985. popts->wrlvl_en = 0;
  986. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  987. /*
  988. * due to ddr3 dimm is fly-by topology
  989. * we suggest to enable write leveling to
  990. * meet the tQDSS under different loading.
  991. */
  992. popts->wrlvl_en = 1;
  993. popts->zq_en = 1;
  994. popts->wrlvl_override = 0;
  995. #endif
  996. /*
  997. * Check interleaving configuration from environment.
  998. * Please refer to doc/README.fsl-ddr for the detail.
  999. *
  1000. * If memory controller interleaving is enabled, then the data
  1001. * bus widths must be programmed identically for all memory controllers.
  1002. *
  1003. * Attempt to set all controllers to the same chip select
  1004. * interleaving mode. It will do a best effort to get the
  1005. * requested ranks interleaved together such that the result
  1006. * should be a subset of the requested configuration.
  1007. *
  1008. * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
  1009. * with 256 Byte is enabled.
  1010. */
  1011. #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
  1012. if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
  1013. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1014. ;
  1015. #else
  1016. goto done;
  1017. #endif
  1018. if (pdimm[0].n_ranks == 0) {
  1019. printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
  1020. popts->memctl_interleaving = 0;
  1021. goto done;
  1022. }
  1023. popts->memctl_interleaving = 1;
  1024. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1025. popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
  1026. popts->memctl_interleaving = 1;
  1027. debug("256 Byte interleaving\n");
  1028. #else
  1029. /*
  1030. * test null first. if CONFIG_HWCONFIG is not defined
  1031. * hwconfig_arg_cmp returns non-zero
  1032. */
  1033. if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
  1034. "null", buf)) {
  1035. popts->memctl_interleaving = 0;
  1036. debug("memory controller interleaving disabled.\n");
  1037. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1038. "ctlr_intlv",
  1039. "cacheline", buf)) {
  1040. popts->memctl_interleaving_mode =
  1041. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1042. 0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
  1043. popts->memctl_interleaving =
  1044. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1045. 0 : 1;
  1046. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1047. "ctlr_intlv",
  1048. "page", buf)) {
  1049. popts->memctl_interleaving_mode =
  1050. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1051. 0 : FSL_DDR_PAGE_INTERLEAVING;
  1052. popts->memctl_interleaving =
  1053. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1054. 0 : 1;
  1055. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1056. "ctlr_intlv",
  1057. "bank", buf)) {
  1058. popts->memctl_interleaving_mode =
  1059. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1060. 0 : FSL_DDR_BANK_INTERLEAVING;
  1061. popts->memctl_interleaving =
  1062. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1063. 0 : 1;
  1064. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1065. "ctlr_intlv",
  1066. "superbank", buf)) {
  1067. popts->memctl_interleaving_mode =
  1068. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1069. 0 : FSL_DDR_SUPERBANK_INTERLEAVING;
  1070. popts->memctl_interleaving =
  1071. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1072. 0 : 1;
  1073. #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
  1074. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1075. "ctlr_intlv",
  1076. "3way_1KB", buf)) {
  1077. popts->memctl_interleaving_mode =
  1078. FSL_DDR_3WAY_1KB_INTERLEAVING;
  1079. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1080. "ctlr_intlv",
  1081. "3way_4KB", buf)) {
  1082. popts->memctl_interleaving_mode =
  1083. FSL_DDR_3WAY_4KB_INTERLEAVING;
  1084. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1085. "ctlr_intlv",
  1086. "3way_8KB", buf)) {
  1087. popts->memctl_interleaving_mode =
  1088. FSL_DDR_3WAY_8KB_INTERLEAVING;
  1089. #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
  1090. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1091. "ctlr_intlv",
  1092. "4way_1KB", buf)) {
  1093. popts->memctl_interleaving_mode =
  1094. FSL_DDR_4WAY_1KB_INTERLEAVING;
  1095. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1096. "ctlr_intlv",
  1097. "4way_4KB", buf)) {
  1098. popts->memctl_interleaving_mode =
  1099. FSL_DDR_4WAY_4KB_INTERLEAVING;
  1100. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1101. "ctlr_intlv",
  1102. "4way_8KB", buf)) {
  1103. popts->memctl_interleaving_mode =
  1104. FSL_DDR_4WAY_8KB_INTERLEAVING;
  1105. #endif
  1106. } else {
  1107. popts->memctl_interleaving = 0;
  1108. printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
  1109. }
  1110. #endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */
  1111. done:
  1112. #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
  1113. if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
  1114. (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
  1115. /* test null first. if CONFIG_HWCONFIG is not defined,
  1116. * hwconfig_subarg_cmp_f returns non-zero */
  1117. if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1118. "null", buf))
  1119. debug("bank interleaving disabled.\n");
  1120. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1121. "cs0_cs1", buf))
  1122. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
  1123. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1124. "cs2_cs3", buf))
  1125. popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
  1126. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1127. "cs0_cs1_and_cs2_cs3", buf))
  1128. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
  1129. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1130. "cs0_cs1_cs2_cs3", buf))
  1131. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
  1132. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1133. "auto", buf))
  1134. popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
  1135. else
  1136. printf("hwconfig has unrecognized parameter for bank_intlv.\n");
  1137. switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
  1138. case FSL_DDR_CS0_CS1_CS2_CS3:
  1139. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1140. if (pdimm[0].n_ranks < 4) {
  1141. popts->ba_intlv_ctl = 0;
  1142. printf("Not enough bank(chip-select) for "
  1143. "CS0+CS1+CS2+CS3 on controller %d, "
  1144. "interleaving disabled!\n", ctrl_num);
  1145. }
  1146. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1147. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  1148. if (pdimm[0].n_ranks == 4)
  1149. break;
  1150. #endif
  1151. if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
  1152. popts->ba_intlv_ctl = 0;
  1153. printf("Not enough bank(chip-select) for "
  1154. "CS0+CS1+CS2+CS3 on controller %d, "
  1155. "interleaving disabled!\n", ctrl_num);
  1156. }
  1157. if (pdimm[0].capacity != pdimm[1].capacity) {
  1158. popts->ba_intlv_ctl = 0;
  1159. printf("Not identical DIMM size for "
  1160. "CS0+CS1+CS2+CS3 on controller %d, "
  1161. "interleaving disabled!\n", ctrl_num);
  1162. }
  1163. #endif
  1164. break;
  1165. case FSL_DDR_CS0_CS1:
  1166. if (pdimm[0].n_ranks < 2) {
  1167. popts->ba_intlv_ctl = 0;
  1168. printf("Not enough bank(chip-select) for "
  1169. "CS0+CS1 on controller %d, "
  1170. "interleaving disabled!\n", ctrl_num);
  1171. }
  1172. break;
  1173. case FSL_DDR_CS2_CS3:
  1174. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1175. if (pdimm[0].n_ranks < 4) {
  1176. popts->ba_intlv_ctl = 0;
  1177. printf("Not enough bank(chip-select) for CS2+CS3 "
  1178. "on controller %d, interleaving disabled!\n", ctrl_num);
  1179. }
  1180. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1181. if (pdimm[1].n_ranks < 2) {
  1182. popts->ba_intlv_ctl = 0;
  1183. printf("Not enough bank(chip-select) for CS2+CS3 "
  1184. "on controller %d, interleaving disabled!\n", ctrl_num);
  1185. }
  1186. #endif
  1187. break;
  1188. case FSL_DDR_CS0_CS1_AND_CS2_CS3:
  1189. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1190. if (pdimm[0].n_ranks < 4) {
  1191. popts->ba_intlv_ctl = 0;
  1192. printf("Not enough bank(CS) for CS0+CS1 and "
  1193. "CS2+CS3 on controller %d, "
  1194. "interleaving disabled!\n", ctrl_num);
  1195. }
  1196. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1197. if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
  1198. popts->ba_intlv_ctl = 0;
  1199. printf("Not enough bank(CS) for CS0+CS1 and "
  1200. "CS2+CS3 on controller %d, "
  1201. "interleaving disabled!\n", ctrl_num);
  1202. }
  1203. #endif
  1204. break;
  1205. default:
  1206. popts->ba_intlv_ctl = 0;
  1207. break;
  1208. }
  1209. }
  1210. if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
  1211. if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
  1212. popts->addr_hash = 0;
  1213. else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
  1214. "true", buf))
  1215. popts->addr_hash = 1;
  1216. }
  1217. if (pdimm[0].n_ranks == 4)
  1218. popts->quad_rank_present = 1;
  1219. popts->package_3ds = pdimm->package_3ds;
  1220. #if (CONFIG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
  1221. ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
  1222. if (popts->registered_dimm_en) {
  1223. popts->rcw_override = 1;
  1224. popts->rcw_1 = 0x000a5a00;
  1225. if (ddr_freq <= 800)
  1226. popts->rcw_2 = 0x00000000;
  1227. else if (ddr_freq <= 1066)
  1228. popts->rcw_2 = 0x00100000;
  1229. else if (ddr_freq <= 1333)
  1230. popts->rcw_2 = 0x00200000;
  1231. else
  1232. popts->rcw_2 = 0x00300000;
  1233. }
  1234. #endif
  1235. fsl_ddr_board_options(popts, pdimm, ctrl_num);
  1236. return 0;
  1237. }
  1238. void check_interleaving_options(fsl_ddr_info_t *pinfo)
  1239. {
  1240. int i, j, k, check_n_ranks, intlv_invalid = 0;
  1241. unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
  1242. unsigned long long check_rank_density;
  1243. struct dimm_params_s *dimm;
  1244. int first_ctrl = pinfo->first_ctrl;
  1245. int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
  1246. /*
  1247. * Check if all controllers are configured for memory
  1248. * controller interleaving. Identical dimms are recommended. At least
  1249. * the size, row and col address should be checked.
  1250. */
  1251. j = 0;
  1252. check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
  1253. check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
  1254. check_n_row_addr = pinfo->dimm_params[first_ctrl][0].n_row_addr;
  1255. check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
  1256. check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
  1257. for (i = first_ctrl; i <= last_ctrl; i++) {
  1258. dimm = &pinfo->dimm_params[i][0];
  1259. if (!pinfo->memctl_opts[i].memctl_interleaving) {
  1260. continue;
  1261. } else if (((check_rank_density != dimm->rank_density) ||
  1262. (check_n_ranks != dimm->n_ranks) ||
  1263. (check_n_row_addr != dimm->n_row_addr) ||
  1264. (check_n_col_addr != dimm->n_col_addr) ||
  1265. (check_intlv !=
  1266. pinfo->memctl_opts[i].memctl_interleaving_mode))){
  1267. intlv_invalid = 1;
  1268. break;
  1269. } else {
  1270. j++;
  1271. }
  1272. }
  1273. if (intlv_invalid) {
  1274. for (i = first_ctrl; i <= last_ctrl; i++)
  1275. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1276. printf("Not all DIMMs are identical. "
  1277. "Memory controller interleaving disabled.\n");
  1278. } else {
  1279. switch (check_intlv) {
  1280. case FSL_DDR_256B_INTERLEAVING:
  1281. case FSL_DDR_CACHE_LINE_INTERLEAVING:
  1282. case FSL_DDR_PAGE_INTERLEAVING:
  1283. case FSL_DDR_BANK_INTERLEAVING:
  1284. case FSL_DDR_SUPERBANK_INTERLEAVING:
  1285. #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
  1286. k = 2;
  1287. #else
  1288. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1289. #endif
  1290. break;
  1291. case FSL_DDR_3WAY_1KB_INTERLEAVING:
  1292. case FSL_DDR_3WAY_4KB_INTERLEAVING:
  1293. case FSL_DDR_3WAY_8KB_INTERLEAVING:
  1294. case FSL_DDR_4WAY_1KB_INTERLEAVING:
  1295. case FSL_DDR_4WAY_4KB_INTERLEAVING:
  1296. case FSL_DDR_4WAY_8KB_INTERLEAVING:
  1297. default:
  1298. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1299. break;
  1300. }
  1301. debug("%d of %d controllers are interleaving.\n", j, k);
  1302. if (j && (j != k)) {
  1303. for (i = first_ctrl; i <= last_ctrl; i++)
  1304. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1305. if ((last_ctrl - first_ctrl) > 1)
  1306. puts("Not all controllers have compatible interleaving mode. All disabled.\n");
  1307. }
  1308. }
  1309. debug("Checking interleaving options completed\n");
  1310. }
  1311. int fsl_use_spd(void)
  1312. {
  1313. int use_spd = 0;
  1314. #ifdef CONFIG_DDR_SPD
  1315. char buffer[HWCONFIG_BUFFER_SIZE];
  1316. char *buf = NULL;
  1317. /*
  1318. * Extract hwconfig from environment since we have not properly setup
  1319. * the environment but need it for ddr config params
  1320. */
  1321. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  1322. buf = buffer;
  1323. /* if hwconfig is not enabled, or "sdram" is not defined, use spd */
  1324. if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
  1325. if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
  1326. use_spd = 1;
  1327. else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
  1328. "fixed", buf))
  1329. use_spd = 0;
  1330. else
  1331. use_spd = 1;
  1332. } else
  1333. use_spd = 1;
  1334. #endif
  1335. return use_spd;
  1336. }