options.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. /*
  2. * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
  3. * Copyright 2017-2018 NXP Semiconductor
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <hwconfig.h>
  9. #include <fsl_ddr_sdram.h>
  10. #include <fsl_ddr.h>
  11. #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
  12. defined(CONFIG_ARM)
  13. #include <asm/arch/clock.h>
  14. #endif
  15. /*
  16. * Use our own stack based buffer before relocation to allow accessing longer
  17. * hwconfig strings that might be in the environment before we've relocated.
  18. * This is pretty fragile on both the use of stack and if the buffer is big
  19. * enough. However we will get a warning from env_get_f() for the latter.
  20. */
  21. /* Board-specific functions defined in each board's ddr.c */
  22. extern void fsl_ddr_board_options(memctl_options_t *popts,
  23. dimm_params_t *pdimm,
  24. unsigned int ctrl_num);
  25. struct dynamic_odt {
  26. unsigned int odt_rd_cfg;
  27. unsigned int odt_wr_cfg;
  28. unsigned int odt_rtt_norm;
  29. unsigned int odt_rtt_wr;
  30. };
  31. #ifdef CONFIG_SYS_FSL_DDR4
  32. /* Quad rank is not verified yet due availability.
  33. * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
  34. */
  35. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  36. { /* cs0 */
  37. FSL_DDR_ODT_NEVER,
  38. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  39. DDR4_RTT_34_OHM, /* unverified */
  40. DDR4_RTT_120_OHM
  41. },
  42. { /* cs1 */
  43. FSL_DDR_ODT_NEVER,
  44. FSL_DDR_ODT_NEVER,
  45. DDR4_RTT_OFF,
  46. DDR4_RTT_120_OHM
  47. },
  48. { /* cs2 */
  49. FSL_DDR_ODT_NEVER,
  50. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  51. DDR4_RTT_34_OHM,
  52. DDR4_RTT_120_OHM
  53. },
  54. { /* cs3 */
  55. FSL_DDR_ODT_NEVER,
  56. FSL_DDR_ODT_NEVER, /* tied high */
  57. DDR4_RTT_OFF,
  58. DDR4_RTT_120_OHM
  59. }
  60. };
  61. static __maybe_unused const struct dynamic_odt single_D[4] = {
  62. { /* cs0 */
  63. FSL_DDR_ODT_NEVER,
  64. FSL_DDR_ODT_ALL,
  65. DDR4_RTT_40_OHM,
  66. DDR4_RTT_OFF
  67. },
  68. { /* cs1 */
  69. FSL_DDR_ODT_NEVER,
  70. FSL_DDR_ODT_NEVER,
  71. DDR4_RTT_OFF,
  72. DDR4_RTT_OFF
  73. },
  74. {0, 0, 0, 0},
  75. {0, 0, 0, 0}
  76. };
  77. static __maybe_unused const struct dynamic_odt single_S[4] = {
  78. { /* cs0 */
  79. FSL_DDR_ODT_NEVER,
  80. FSL_DDR_ODT_ALL,
  81. DDR4_RTT_40_OHM,
  82. DDR4_RTT_OFF
  83. },
  84. {0, 0, 0, 0},
  85. {0, 0, 0, 0},
  86. {0, 0, 0, 0},
  87. };
  88. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  89. { /* cs0 */
  90. FSL_DDR_ODT_NEVER,
  91. FSL_DDR_ODT_SAME_DIMM,
  92. DDR4_RTT_120_OHM,
  93. DDR4_RTT_OFF
  94. },
  95. { /* cs1 */
  96. FSL_DDR_ODT_OTHER_DIMM,
  97. FSL_DDR_ODT_OTHER_DIMM,
  98. DDR4_RTT_34_OHM,
  99. DDR4_RTT_OFF
  100. },
  101. { /* cs2 */
  102. FSL_DDR_ODT_NEVER,
  103. FSL_DDR_ODT_SAME_DIMM,
  104. DDR4_RTT_120_OHM,
  105. DDR4_RTT_OFF
  106. },
  107. { /* cs3 */
  108. FSL_DDR_ODT_OTHER_DIMM,
  109. FSL_DDR_ODT_OTHER_DIMM,
  110. DDR4_RTT_34_OHM,
  111. DDR4_RTT_OFF
  112. }
  113. };
  114. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  115. { /* cs0 */
  116. FSL_DDR_ODT_NEVER,
  117. FSL_DDR_ODT_SAME_DIMM,
  118. DDR4_RTT_120_OHM,
  119. DDR4_RTT_OFF
  120. },
  121. { /* cs1 */
  122. FSL_DDR_ODT_OTHER_DIMM,
  123. FSL_DDR_ODT_OTHER_DIMM,
  124. DDR4_RTT_34_OHM,
  125. DDR4_RTT_OFF
  126. },
  127. { /* cs2 */
  128. FSL_DDR_ODT_OTHER_DIMM,
  129. FSL_DDR_ODT_ALL,
  130. DDR4_RTT_34_OHM,
  131. DDR4_RTT_120_OHM
  132. },
  133. {0, 0, 0, 0}
  134. };
  135. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  136. { /* cs0 */
  137. FSL_DDR_ODT_OTHER_DIMM,
  138. FSL_DDR_ODT_ALL,
  139. DDR4_RTT_34_OHM,
  140. DDR4_RTT_120_OHM
  141. },
  142. {0, 0, 0, 0},
  143. { /* cs2 */
  144. FSL_DDR_ODT_NEVER,
  145. FSL_DDR_ODT_SAME_DIMM,
  146. DDR4_RTT_120_OHM,
  147. DDR4_RTT_OFF
  148. },
  149. { /* cs3 */
  150. FSL_DDR_ODT_OTHER_DIMM,
  151. FSL_DDR_ODT_OTHER_DIMM,
  152. DDR4_RTT_34_OHM,
  153. DDR4_RTT_OFF
  154. }
  155. };
  156. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  157. { /* cs0 */
  158. FSL_DDR_ODT_OTHER_DIMM,
  159. FSL_DDR_ODT_ALL,
  160. DDR4_RTT_34_OHM,
  161. DDR4_RTT_120_OHM
  162. },
  163. {0, 0, 0, 0},
  164. { /* cs2 */
  165. FSL_DDR_ODT_OTHER_DIMM,
  166. FSL_DDR_ODT_ALL,
  167. DDR4_RTT_34_OHM,
  168. DDR4_RTT_120_OHM
  169. },
  170. {0, 0, 0, 0}
  171. };
  172. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  173. { /* cs0 */
  174. FSL_DDR_ODT_NEVER,
  175. FSL_DDR_ODT_SAME_DIMM,
  176. DDR4_RTT_40_OHM,
  177. DDR4_RTT_OFF
  178. },
  179. { /* cs1 */
  180. FSL_DDR_ODT_NEVER,
  181. FSL_DDR_ODT_NEVER,
  182. DDR4_RTT_OFF,
  183. DDR4_RTT_OFF
  184. },
  185. {0, 0, 0, 0},
  186. {0, 0, 0, 0}
  187. };
  188. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  189. {0, 0, 0, 0},
  190. {0, 0, 0, 0},
  191. { /* cs2 */
  192. FSL_DDR_ODT_NEVER,
  193. FSL_DDR_ODT_SAME_DIMM,
  194. DDR4_RTT_40_OHM,
  195. DDR4_RTT_OFF
  196. },
  197. { /* cs3 */
  198. FSL_DDR_ODT_NEVER,
  199. FSL_DDR_ODT_NEVER,
  200. DDR4_RTT_OFF,
  201. DDR4_RTT_OFF
  202. }
  203. };
  204. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  205. { /* cs0 */
  206. FSL_DDR_ODT_NEVER,
  207. FSL_DDR_ODT_CS,
  208. DDR4_RTT_40_OHM,
  209. DDR4_RTT_OFF
  210. },
  211. {0, 0, 0, 0},
  212. {0, 0, 0, 0},
  213. {0, 0, 0, 0}
  214. };
  215. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  216. {0, 0, 0, 0},
  217. {0, 0, 0, 0},
  218. { /* cs2 */
  219. FSL_DDR_ODT_NEVER,
  220. FSL_DDR_ODT_CS,
  221. DDR4_RTT_40_OHM,
  222. DDR4_RTT_OFF
  223. },
  224. {0, 0, 0, 0}
  225. };
  226. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  227. { /* cs0 */
  228. FSL_DDR_ODT_NEVER,
  229. FSL_DDR_ODT_CS,
  230. DDR4_RTT_120_OHM,
  231. DDR4_RTT_OFF
  232. },
  233. { /* cs1 */
  234. FSL_DDR_ODT_NEVER,
  235. FSL_DDR_ODT_CS,
  236. DDR4_RTT_120_OHM,
  237. DDR4_RTT_OFF
  238. },
  239. { /* cs2 */
  240. FSL_DDR_ODT_NEVER,
  241. FSL_DDR_ODT_CS,
  242. DDR4_RTT_120_OHM,
  243. DDR4_RTT_OFF
  244. },
  245. { /* cs3 */
  246. FSL_DDR_ODT_NEVER,
  247. FSL_DDR_ODT_CS,
  248. DDR4_RTT_120_OHM,
  249. DDR4_RTT_OFF
  250. }
  251. };
  252. #elif defined(CONFIG_SYS_FSL_DDR3)
  253. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  254. { /* cs0 */
  255. FSL_DDR_ODT_NEVER,
  256. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  257. DDR3_RTT_20_OHM,
  258. DDR3_RTT_120_OHM
  259. },
  260. { /* cs1 */
  261. FSL_DDR_ODT_NEVER,
  262. FSL_DDR_ODT_NEVER, /* tied high */
  263. DDR3_RTT_OFF,
  264. DDR3_RTT_120_OHM
  265. },
  266. { /* cs2 */
  267. FSL_DDR_ODT_NEVER,
  268. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  269. DDR3_RTT_20_OHM,
  270. DDR3_RTT_120_OHM
  271. },
  272. { /* cs3 */
  273. FSL_DDR_ODT_NEVER,
  274. FSL_DDR_ODT_NEVER, /* tied high */
  275. DDR3_RTT_OFF,
  276. DDR3_RTT_120_OHM
  277. }
  278. };
  279. static __maybe_unused const struct dynamic_odt single_D[4] = {
  280. { /* cs0 */
  281. FSL_DDR_ODT_NEVER,
  282. FSL_DDR_ODT_ALL,
  283. DDR3_RTT_40_OHM,
  284. DDR3_RTT_OFF
  285. },
  286. { /* cs1 */
  287. FSL_DDR_ODT_NEVER,
  288. FSL_DDR_ODT_NEVER,
  289. DDR3_RTT_OFF,
  290. DDR3_RTT_OFF
  291. },
  292. {0, 0, 0, 0},
  293. {0, 0, 0, 0}
  294. };
  295. static __maybe_unused const struct dynamic_odt single_S[4] = {
  296. { /* cs0 */
  297. FSL_DDR_ODT_NEVER,
  298. FSL_DDR_ODT_ALL,
  299. DDR3_RTT_40_OHM,
  300. DDR3_RTT_OFF
  301. },
  302. {0, 0, 0, 0},
  303. {0, 0, 0, 0},
  304. {0, 0, 0, 0},
  305. };
  306. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  307. { /* cs0 */
  308. FSL_DDR_ODT_NEVER,
  309. FSL_DDR_ODT_SAME_DIMM,
  310. DDR3_RTT_120_OHM,
  311. DDR3_RTT_OFF
  312. },
  313. { /* cs1 */
  314. FSL_DDR_ODT_OTHER_DIMM,
  315. FSL_DDR_ODT_OTHER_DIMM,
  316. DDR3_RTT_30_OHM,
  317. DDR3_RTT_OFF
  318. },
  319. { /* cs2 */
  320. FSL_DDR_ODT_NEVER,
  321. FSL_DDR_ODT_SAME_DIMM,
  322. DDR3_RTT_120_OHM,
  323. DDR3_RTT_OFF
  324. },
  325. { /* cs3 */
  326. FSL_DDR_ODT_OTHER_DIMM,
  327. FSL_DDR_ODT_OTHER_DIMM,
  328. DDR3_RTT_30_OHM,
  329. DDR3_RTT_OFF
  330. }
  331. };
  332. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  333. { /* cs0 */
  334. FSL_DDR_ODT_NEVER,
  335. FSL_DDR_ODT_SAME_DIMM,
  336. DDR3_RTT_120_OHM,
  337. DDR3_RTT_OFF
  338. },
  339. { /* cs1 */
  340. FSL_DDR_ODT_OTHER_DIMM,
  341. FSL_DDR_ODT_OTHER_DIMM,
  342. DDR3_RTT_30_OHM,
  343. DDR3_RTT_OFF
  344. },
  345. { /* cs2 */
  346. FSL_DDR_ODT_OTHER_DIMM,
  347. FSL_DDR_ODT_ALL,
  348. DDR3_RTT_20_OHM,
  349. DDR3_RTT_120_OHM
  350. },
  351. {0, 0, 0, 0}
  352. };
  353. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  354. { /* cs0 */
  355. FSL_DDR_ODT_OTHER_DIMM,
  356. FSL_DDR_ODT_ALL,
  357. DDR3_RTT_20_OHM,
  358. DDR3_RTT_120_OHM
  359. },
  360. {0, 0, 0, 0},
  361. { /* cs2 */
  362. FSL_DDR_ODT_NEVER,
  363. FSL_DDR_ODT_SAME_DIMM,
  364. DDR3_RTT_120_OHM,
  365. DDR3_RTT_OFF
  366. },
  367. { /* cs3 */
  368. FSL_DDR_ODT_OTHER_DIMM,
  369. FSL_DDR_ODT_OTHER_DIMM,
  370. DDR3_RTT_20_OHM,
  371. DDR3_RTT_OFF
  372. }
  373. };
  374. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  375. { /* cs0 */
  376. FSL_DDR_ODT_OTHER_DIMM,
  377. FSL_DDR_ODT_ALL,
  378. DDR3_RTT_30_OHM,
  379. DDR3_RTT_120_OHM
  380. },
  381. {0, 0, 0, 0},
  382. { /* cs2 */
  383. FSL_DDR_ODT_OTHER_DIMM,
  384. FSL_DDR_ODT_ALL,
  385. DDR3_RTT_30_OHM,
  386. DDR3_RTT_120_OHM
  387. },
  388. {0, 0, 0, 0}
  389. };
  390. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  391. { /* cs0 */
  392. FSL_DDR_ODT_NEVER,
  393. FSL_DDR_ODT_SAME_DIMM,
  394. DDR3_RTT_40_OHM,
  395. DDR3_RTT_OFF
  396. },
  397. { /* cs1 */
  398. FSL_DDR_ODT_NEVER,
  399. FSL_DDR_ODT_NEVER,
  400. DDR3_RTT_OFF,
  401. DDR3_RTT_OFF
  402. },
  403. {0, 0, 0, 0},
  404. {0, 0, 0, 0}
  405. };
  406. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  407. {0, 0, 0, 0},
  408. {0, 0, 0, 0},
  409. { /* cs2 */
  410. FSL_DDR_ODT_NEVER,
  411. FSL_DDR_ODT_SAME_DIMM,
  412. DDR3_RTT_40_OHM,
  413. DDR3_RTT_OFF
  414. },
  415. { /* cs3 */
  416. FSL_DDR_ODT_NEVER,
  417. FSL_DDR_ODT_NEVER,
  418. DDR3_RTT_OFF,
  419. DDR3_RTT_OFF
  420. }
  421. };
  422. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  423. { /* cs0 */
  424. FSL_DDR_ODT_NEVER,
  425. FSL_DDR_ODT_CS,
  426. DDR3_RTT_40_OHM,
  427. DDR3_RTT_OFF
  428. },
  429. {0, 0, 0, 0},
  430. {0, 0, 0, 0},
  431. {0, 0, 0, 0}
  432. };
  433. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  434. {0, 0, 0, 0},
  435. {0, 0, 0, 0},
  436. { /* cs2 */
  437. FSL_DDR_ODT_NEVER,
  438. FSL_DDR_ODT_CS,
  439. DDR3_RTT_40_OHM,
  440. DDR3_RTT_OFF
  441. },
  442. {0, 0, 0, 0}
  443. };
  444. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  445. { /* cs0 */
  446. FSL_DDR_ODT_NEVER,
  447. FSL_DDR_ODT_CS,
  448. DDR3_RTT_120_OHM,
  449. DDR3_RTT_OFF
  450. },
  451. { /* cs1 */
  452. FSL_DDR_ODT_NEVER,
  453. FSL_DDR_ODT_CS,
  454. DDR3_RTT_120_OHM,
  455. DDR3_RTT_OFF
  456. },
  457. { /* cs2 */
  458. FSL_DDR_ODT_NEVER,
  459. FSL_DDR_ODT_CS,
  460. DDR3_RTT_120_OHM,
  461. DDR3_RTT_OFF
  462. },
  463. { /* cs3 */
  464. FSL_DDR_ODT_NEVER,
  465. FSL_DDR_ODT_CS,
  466. DDR3_RTT_120_OHM,
  467. DDR3_RTT_OFF
  468. }
  469. };
  470. #else /* CONFIG_SYS_FSL_DDR3 */
  471. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  472. {0, 0, 0, 0},
  473. {0, 0, 0, 0},
  474. {0, 0, 0, 0},
  475. {0, 0, 0, 0}
  476. };
  477. static __maybe_unused const struct dynamic_odt single_D[4] = {
  478. { /* cs0 */
  479. FSL_DDR_ODT_NEVER,
  480. FSL_DDR_ODT_ALL,
  481. DDR2_RTT_150_OHM,
  482. DDR2_RTT_OFF
  483. },
  484. { /* cs1 */
  485. FSL_DDR_ODT_NEVER,
  486. FSL_DDR_ODT_NEVER,
  487. DDR2_RTT_OFF,
  488. DDR2_RTT_OFF
  489. },
  490. {0, 0, 0, 0},
  491. {0, 0, 0, 0}
  492. };
  493. static __maybe_unused const struct dynamic_odt single_S[4] = {
  494. { /* cs0 */
  495. FSL_DDR_ODT_NEVER,
  496. FSL_DDR_ODT_ALL,
  497. DDR2_RTT_150_OHM,
  498. DDR2_RTT_OFF
  499. },
  500. {0, 0, 0, 0},
  501. {0, 0, 0, 0},
  502. {0, 0, 0, 0},
  503. };
  504. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  505. { /* cs0 */
  506. FSL_DDR_ODT_OTHER_DIMM,
  507. FSL_DDR_ODT_OTHER_DIMM,
  508. DDR2_RTT_75_OHM,
  509. DDR2_RTT_OFF
  510. },
  511. { /* cs1 */
  512. FSL_DDR_ODT_NEVER,
  513. FSL_DDR_ODT_NEVER,
  514. DDR2_RTT_OFF,
  515. DDR2_RTT_OFF
  516. },
  517. { /* cs2 */
  518. FSL_DDR_ODT_OTHER_DIMM,
  519. FSL_DDR_ODT_OTHER_DIMM,
  520. DDR2_RTT_75_OHM,
  521. DDR2_RTT_OFF
  522. },
  523. { /* cs3 */
  524. FSL_DDR_ODT_NEVER,
  525. FSL_DDR_ODT_NEVER,
  526. DDR2_RTT_OFF,
  527. DDR2_RTT_OFF
  528. }
  529. };
  530. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  531. { /* cs0 */
  532. FSL_DDR_ODT_OTHER_DIMM,
  533. FSL_DDR_ODT_OTHER_DIMM,
  534. DDR2_RTT_75_OHM,
  535. DDR2_RTT_OFF
  536. },
  537. { /* cs1 */
  538. FSL_DDR_ODT_NEVER,
  539. FSL_DDR_ODT_NEVER,
  540. DDR2_RTT_OFF,
  541. DDR2_RTT_OFF
  542. },
  543. { /* cs2 */
  544. FSL_DDR_ODT_OTHER_DIMM,
  545. FSL_DDR_ODT_OTHER_DIMM,
  546. DDR2_RTT_75_OHM,
  547. DDR2_RTT_OFF
  548. },
  549. {0, 0, 0, 0}
  550. };
  551. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  552. { /* cs0 */
  553. FSL_DDR_ODT_OTHER_DIMM,
  554. FSL_DDR_ODT_OTHER_DIMM,
  555. DDR2_RTT_75_OHM,
  556. DDR2_RTT_OFF
  557. },
  558. {0, 0, 0, 0},
  559. { /* cs2 */
  560. FSL_DDR_ODT_OTHER_DIMM,
  561. FSL_DDR_ODT_OTHER_DIMM,
  562. DDR2_RTT_75_OHM,
  563. DDR2_RTT_OFF
  564. },
  565. { /* cs3 */
  566. FSL_DDR_ODT_NEVER,
  567. FSL_DDR_ODT_NEVER,
  568. DDR2_RTT_OFF,
  569. DDR2_RTT_OFF
  570. }
  571. };
  572. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  573. { /* cs0 */
  574. FSL_DDR_ODT_OTHER_DIMM,
  575. FSL_DDR_ODT_OTHER_DIMM,
  576. DDR2_RTT_75_OHM,
  577. DDR2_RTT_OFF
  578. },
  579. {0, 0, 0, 0},
  580. { /* cs2 */
  581. FSL_DDR_ODT_OTHER_DIMM,
  582. FSL_DDR_ODT_OTHER_DIMM,
  583. DDR2_RTT_75_OHM,
  584. DDR2_RTT_OFF
  585. },
  586. {0, 0, 0, 0}
  587. };
  588. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  589. { /* cs0 */
  590. FSL_DDR_ODT_NEVER,
  591. FSL_DDR_ODT_ALL,
  592. DDR2_RTT_150_OHM,
  593. DDR2_RTT_OFF
  594. },
  595. { /* cs1 */
  596. FSL_DDR_ODT_NEVER,
  597. FSL_DDR_ODT_NEVER,
  598. DDR2_RTT_OFF,
  599. DDR2_RTT_OFF
  600. },
  601. {0, 0, 0, 0},
  602. {0, 0, 0, 0}
  603. };
  604. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  605. {0, 0, 0, 0},
  606. {0, 0, 0, 0},
  607. { /* cs2 */
  608. FSL_DDR_ODT_NEVER,
  609. FSL_DDR_ODT_ALL,
  610. DDR2_RTT_150_OHM,
  611. DDR2_RTT_OFF
  612. },
  613. { /* cs3 */
  614. FSL_DDR_ODT_NEVER,
  615. FSL_DDR_ODT_NEVER,
  616. DDR2_RTT_OFF,
  617. DDR2_RTT_OFF
  618. }
  619. };
  620. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  621. { /* cs0 */
  622. FSL_DDR_ODT_NEVER,
  623. FSL_DDR_ODT_CS,
  624. DDR2_RTT_150_OHM,
  625. DDR2_RTT_OFF
  626. },
  627. {0, 0, 0, 0},
  628. {0, 0, 0, 0},
  629. {0, 0, 0, 0}
  630. };
  631. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  632. {0, 0, 0, 0},
  633. {0, 0, 0, 0},
  634. { /* cs2 */
  635. FSL_DDR_ODT_NEVER,
  636. FSL_DDR_ODT_CS,
  637. DDR2_RTT_150_OHM,
  638. DDR2_RTT_OFF
  639. },
  640. {0, 0, 0, 0}
  641. };
  642. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  643. { /* cs0 */
  644. FSL_DDR_ODT_NEVER,
  645. FSL_DDR_ODT_CS,
  646. DDR2_RTT_75_OHM,
  647. DDR2_RTT_OFF
  648. },
  649. { /* cs1 */
  650. FSL_DDR_ODT_NEVER,
  651. FSL_DDR_ODT_NEVER,
  652. DDR2_RTT_OFF,
  653. DDR2_RTT_OFF
  654. },
  655. { /* cs2 */
  656. FSL_DDR_ODT_NEVER,
  657. FSL_DDR_ODT_CS,
  658. DDR2_RTT_75_OHM,
  659. DDR2_RTT_OFF
  660. },
  661. { /* cs3 */
  662. FSL_DDR_ODT_NEVER,
  663. FSL_DDR_ODT_NEVER,
  664. DDR2_RTT_OFF,
  665. DDR2_RTT_OFF
  666. }
  667. };
  668. #endif
  669. /*
  670. * Automatically seleect bank interleaving mode based on DIMMs
  671. * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
  672. * This function only deal with one or two slots per controller.
  673. */
  674. static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
  675. {
  676. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  677. if (pdimm[0].n_ranks == 4)
  678. return FSL_DDR_CS0_CS1_CS2_CS3;
  679. else if (pdimm[0].n_ranks == 2)
  680. return FSL_DDR_CS0_CS1;
  681. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  682. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  683. if (pdimm[0].n_ranks == 4)
  684. return FSL_DDR_CS0_CS1_CS2_CS3;
  685. #endif
  686. if (pdimm[0].n_ranks == 2) {
  687. if (pdimm[1].n_ranks == 2)
  688. return FSL_DDR_CS0_CS1_CS2_CS3;
  689. else
  690. return FSL_DDR_CS0_CS1;
  691. }
  692. #endif
  693. return 0;
  694. }
  695. unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
  696. memctl_options_t *popts,
  697. dimm_params_t *pdimm,
  698. unsigned int ctrl_num)
  699. {
  700. unsigned int i;
  701. char buffer[HWCONFIG_BUFFER_SIZE];
  702. char *buf = NULL;
  703. #if defined(CONFIG_SYS_FSL_DDR3) || \
  704. defined(CONFIG_SYS_FSL_DDR2) || \
  705. defined(CONFIG_SYS_FSL_DDR4)
  706. const struct dynamic_odt *pdodt = odt_unknown;
  707. #endif
  708. ulong ddr_freq;
  709. /*
  710. * Extract hwconfig from environment since we have not properly setup
  711. * the environment but need it for ddr config params
  712. */
  713. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  714. buf = buffer;
  715. #if defined(CONFIG_SYS_FSL_DDR3) || \
  716. defined(CONFIG_SYS_FSL_DDR2) || \
  717. defined(CONFIG_SYS_FSL_DDR4)
  718. /* Chip select options. */
  719. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  720. switch (pdimm[0].n_ranks) {
  721. case 1:
  722. pdodt = single_S;
  723. break;
  724. case 2:
  725. pdodt = single_D;
  726. break;
  727. case 4:
  728. pdodt = single_Q;
  729. break;
  730. }
  731. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  732. switch (pdimm[0].n_ranks) {
  733. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  734. case 4:
  735. pdodt = single_Q;
  736. if (pdimm[1].n_ranks)
  737. printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
  738. break;
  739. #endif
  740. case 2:
  741. switch (pdimm[1].n_ranks) {
  742. case 2:
  743. pdodt = dual_DD;
  744. break;
  745. case 1:
  746. pdodt = dual_DS;
  747. break;
  748. case 0:
  749. pdodt = dual_D0;
  750. break;
  751. }
  752. break;
  753. case 1:
  754. switch (pdimm[1].n_ranks) {
  755. case 2:
  756. pdodt = dual_SD;
  757. break;
  758. case 1:
  759. pdodt = dual_SS;
  760. break;
  761. case 0:
  762. pdodt = dual_S0;
  763. break;
  764. }
  765. break;
  766. case 0:
  767. switch (pdimm[1].n_ranks) {
  768. case 2:
  769. pdodt = dual_0D;
  770. break;
  771. case 1:
  772. pdodt = dual_0S;
  773. break;
  774. }
  775. break;
  776. }
  777. #endif /* CONFIG_DIMM_SLOTS_PER_CTLR */
  778. #endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */
  779. /* Pick chip-select local options. */
  780. for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
  781. #if defined(CONFIG_SYS_FSL_DDR3) || \
  782. defined(CONFIG_SYS_FSL_DDR2) || \
  783. defined(CONFIG_SYS_FSL_DDR4)
  784. popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
  785. popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
  786. popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
  787. popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
  788. #else
  789. popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
  790. popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
  791. #endif
  792. popts->cs_local_opts[i].auto_precharge = 0;
  793. }
  794. /* Pick interleaving mode. */
  795. /*
  796. * 0 = no interleaving
  797. * 1 = interleaving between 2 controllers
  798. */
  799. popts->memctl_interleaving = 0;
  800. /*
  801. * 0 = cacheline
  802. * 1 = page
  803. * 2 = (logical) bank
  804. * 3 = superbank (only if CS interleaving is enabled)
  805. */
  806. popts->memctl_interleaving_mode = 0;
  807. /*
  808. * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
  809. * 1: page: bit to the left of the column bits selects the memctl
  810. * 2: bank: bit to the left of the bank bits selects the memctl
  811. * 3: superbank: bit to the left of the chip select selects the memctl
  812. *
  813. * NOTE: ba_intlv (rank interleaving) is independent of memory
  814. * controller interleaving; it is only within a memory controller.
  815. * Must use superbank interleaving if rank interleaving is used and
  816. * memory controller interleaving is enabled.
  817. */
  818. /*
  819. * 0 = no
  820. * 0x40 = CS0,CS1
  821. * 0x20 = CS2,CS3
  822. * 0x60 = CS0,CS1 + CS2,CS3
  823. * 0x04 = CS0,CS1,CS2,CS3
  824. */
  825. popts->ba_intlv_ctl = 0;
  826. /* Memory Organization Parameters */
  827. popts->registered_dimm_en = common_dimm->all_dimms_registered;
  828. /* Operational Mode Paramters */
  829. /* Pick ECC modes */
  830. popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */
  831. #ifdef CONFIG_DDR_ECC
  832. if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
  833. if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
  834. popts->ecc_mode = 1;
  835. } else
  836. popts->ecc_mode = 1;
  837. #endif
  838. /* 1 = use memory controler to init data */
  839. popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
  840. /*
  841. * Choose DQS config
  842. * 0 for DDR1
  843. * 1 for DDR2
  844. */
  845. #if defined(CONFIG_SYS_FSL_DDR1)
  846. popts->dqs_config = 0;
  847. #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
  848. popts->dqs_config = 1;
  849. #endif
  850. /* Choose self-refresh during sleep. */
  851. popts->self_refresh_in_sleep = 1;
  852. /* Choose dynamic power management mode. */
  853. popts->dynamic_power = 0;
  854. /*
  855. * check first dimm for primary sdram width
  856. * presuming all dimms are similar
  857. * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
  858. */
  859. #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
  860. if (pdimm[0].n_ranks != 0) {
  861. if ((pdimm[0].data_width >= 64) && \
  862. (pdimm[0].data_width <= 72))
  863. popts->data_bus_width = 0;
  864. else if ((pdimm[0].data_width >= 32) && \
  865. (pdimm[0].data_width <= 40))
  866. popts->data_bus_width = 1;
  867. else {
  868. panic("Error: data width %u is invalid!\n",
  869. pdimm[0].data_width);
  870. }
  871. }
  872. #else
  873. if (pdimm[0].n_ranks != 0) {
  874. if (pdimm[0].primary_sdram_width == 64)
  875. popts->data_bus_width = 0;
  876. else if (pdimm[0].primary_sdram_width == 32)
  877. popts->data_bus_width = 1;
  878. else if (pdimm[0].primary_sdram_width == 16)
  879. popts->data_bus_width = 2;
  880. else {
  881. panic("Error: primary sdram width %u is invalid!\n",
  882. pdimm[0].primary_sdram_width);
  883. }
  884. }
  885. #endif
  886. popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
  887. /* Choose burst length. */
  888. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  889. #if defined(CONFIG_E500MC)
  890. popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */
  891. popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
  892. #else
  893. if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
  894. /* 32-bit or 16-bit bus */
  895. popts->otf_burst_chop_en = 0;
  896. popts->burst_length = DDR_BL8;
  897. } else {
  898. popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
  899. popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
  900. }
  901. #endif
  902. #else
  903. popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
  904. #endif
  905. /* Choose ddr controller address mirror mode */
  906. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  907. for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
  908. if (pdimm[i].n_ranks) {
  909. popts->mirrored_dimm = pdimm[i].mirrored_dimm;
  910. break;
  911. }
  912. }
  913. #endif
  914. /* Global Timing Parameters. */
  915. debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
  916. /* Pick a caslat override. */
  917. popts->cas_latency_override = 0;
  918. popts->cas_latency_override_value = 3;
  919. if (popts->cas_latency_override) {
  920. debug("using caslat override value = %u\n",
  921. popts->cas_latency_override_value);
  922. }
  923. /* Decide whether to use the computed derated latency */
  924. popts->use_derated_caslat = 0;
  925. /* Choose an additive latency. */
  926. popts->additive_latency_override = 0;
  927. popts->additive_latency_override_value = 3;
  928. if (popts->additive_latency_override) {
  929. debug("using additive latency override value = %u\n",
  930. popts->additive_latency_override_value);
  931. }
  932. /*
  933. * 2T_EN setting
  934. *
  935. * Factors to consider for 2T_EN:
  936. * - number of DIMMs installed
  937. * - number of components, number of active ranks
  938. * - how much time you want to spend playing around
  939. */
  940. popts->twot_en = 0;
  941. popts->threet_en = 0;
  942. /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
  943. if (popts->registered_dimm_en)
  944. popts->ap_en = 1; /* 0 = disable, 1 = enable */
  945. else
  946. popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
  947. if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
  948. if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
  949. if (popts->registered_dimm_en ||
  950. (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
  951. popts->ap_en = 1;
  952. }
  953. }
  954. /*
  955. * BSTTOPRE precharge interval
  956. *
  957. * Set this to 0 for global auto precharge
  958. * The value of 0x100 has been used for DDR1, DDR2, DDR3.
  959. * It is not wrong. Any value should be OK. The performance depends on
  960. * applications. There is no one good value for all. One way to set
  961. * is to use 1/4 of refint value.
  962. */
  963. popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
  964. >> 2;
  965. /*
  966. * Window for four activates -- tFAW
  967. *
  968. * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
  969. * FIXME: varies depending upon number of column addresses or data
  970. * FIXME: width, was considering looking at pdimm->primary_sdram_width
  971. */
  972. #if defined(CONFIG_SYS_FSL_DDR1)
  973. popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
  974. #elif defined(CONFIG_SYS_FSL_DDR2)
  975. /*
  976. * x4/x8; some datasheets have 35000
  977. * x16 wide columns only? Use 50000?
  978. */
  979. popts->tfaw_window_four_activates_ps = 37500;
  980. #else
  981. popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
  982. #endif
  983. popts->zq_en = 0;
  984. popts->wrlvl_en = 0;
  985. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  986. /*
  987. * due to ddr3 dimm is fly-by topology
  988. * we suggest to enable write leveling to
  989. * meet the tQDSS under different loading.
  990. */
  991. popts->wrlvl_en = 1;
  992. popts->zq_en = 1;
  993. popts->wrlvl_override = 0;
  994. #endif
  995. /*
  996. * Check interleaving configuration from environment.
  997. * Please refer to doc/README.fsl-ddr for the detail.
  998. *
  999. * If memory controller interleaving is enabled, then the data
  1000. * bus widths must be programmed identically for all memory controllers.
  1001. *
  1002. * Attempt to set all controllers to the same chip select
  1003. * interleaving mode. It will do a best effort to get the
  1004. * requested ranks interleaved together such that the result
  1005. * should be a subset of the requested configuration.
  1006. *
  1007. * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
  1008. * with 256 Byte is enabled.
  1009. */
  1010. #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
  1011. if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
  1012. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1013. ;
  1014. #else
  1015. goto done;
  1016. #endif
  1017. if (pdimm[0].n_ranks == 0) {
  1018. printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
  1019. popts->memctl_interleaving = 0;
  1020. goto done;
  1021. }
  1022. popts->memctl_interleaving = 1;
  1023. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1024. popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
  1025. popts->memctl_interleaving = 1;
  1026. debug("256 Byte interleaving\n");
  1027. #else
  1028. /*
  1029. * test null first. if CONFIG_HWCONFIG is not defined
  1030. * hwconfig_arg_cmp returns non-zero
  1031. */
  1032. if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
  1033. "null", buf)) {
  1034. popts->memctl_interleaving = 0;
  1035. debug("memory controller interleaving disabled.\n");
  1036. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1037. "ctlr_intlv",
  1038. "cacheline", buf)) {
  1039. popts->memctl_interleaving_mode =
  1040. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1041. 0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
  1042. popts->memctl_interleaving =
  1043. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1044. 0 : 1;
  1045. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1046. "ctlr_intlv",
  1047. "page", buf)) {
  1048. popts->memctl_interleaving_mode =
  1049. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1050. 0 : FSL_DDR_PAGE_INTERLEAVING;
  1051. popts->memctl_interleaving =
  1052. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1053. 0 : 1;
  1054. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1055. "ctlr_intlv",
  1056. "bank", buf)) {
  1057. popts->memctl_interleaving_mode =
  1058. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1059. 0 : FSL_DDR_BANK_INTERLEAVING;
  1060. popts->memctl_interleaving =
  1061. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1062. 0 : 1;
  1063. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1064. "ctlr_intlv",
  1065. "superbank", buf)) {
  1066. popts->memctl_interleaving_mode =
  1067. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1068. 0 : FSL_DDR_SUPERBANK_INTERLEAVING;
  1069. popts->memctl_interleaving =
  1070. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1071. 0 : 1;
  1072. #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
  1073. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1074. "ctlr_intlv",
  1075. "3way_1KB", buf)) {
  1076. popts->memctl_interleaving_mode =
  1077. FSL_DDR_3WAY_1KB_INTERLEAVING;
  1078. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1079. "ctlr_intlv",
  1080. "3way_4KB", buf)) {
  1081. popts->memctl_interleaving_mode =
  1082. FSL_DDR_3WAY_4KB_INTERLEAVING;
  1083. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1084. "ctlr_intlv",
  1085. "3way_8KB", buf)) {
  1086. popts->memctl_interleaving_mode =
  1087. FSL_DDR_3WAY_8KB_INTERLEAVING;
  1088. #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
  1089. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1090. "ctlr_intlv",
  1091. "4way_1KB", buf)) {
  1092. popts->memctl_interleaving_mode =
  1093. FSL_DDR_4WAY_1KB_INTERLEAVING;
  1094. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1095. "ctlr_intlv",
  1096. "4way_4KB", buf)) {
  1097. popts->memctl_interleaving_mode =
  1098. FSL_DDR_4WAY_4KB_INTERLEAVING;
  1099. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1100. "ctlr_intlv",
  1101. "4way_8KB", buf)) {
  1102. popts->memctl_interleaving_mode =
  1103. FSL_DDR_4WAY_8KB_INTERLEAVING;
  1104. #endif
  1105. } else {
  1106. popts->memctl_interleaving = 0;
  1107. printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
  1108. }
  1109. #endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */
  1110. done:
  1111. #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
  1112. if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
  1113. (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
  1114. /* test null first. if CONFIG_HWCONFIG is not defined,
  1115. * hwconfig_subarg_cmp_f returns non-zero */
  1116. if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1117. "null", buf))
  1118. debug("bank interleaving disabled.\n");
  1119. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1120. "cs0_cs1", buf))
  1121. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
  1122. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1123. "cs2_cs3", buf))
  1124. popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
  1125. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1126. "cs0_cs1_and_cs2_cs3", buf))
  1127. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
  1128. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1129. "cs0_cs1_cs2_cs3", buf))
  1130. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
  1131. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1132. "auto", buf))
  1133. popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
  1134. else
  1135. printf("hwconfig has unrecognized parameter for bank_intlv.\n");
  1136. switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
  1137. case FSL_DDR_CS0_CS1_CS2_CS3:
  1138. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1139. if (pdimm[0].n_ranks < 4) {
  1140. popts->ba_intlv_ctl = 0;
  1141. printf("Not enough bank(chip-select) for "
  1142. "CS0+CS1+CS2+CS3 on controller %d, "
  1143. "interleaving disabled!\n", ctrl_num);
  1144. }
  1145. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1146. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  1147. if (pdimm[0].n_ranks == 4)
  1148. break;
  1149. #endif
  1150. if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
  1151. popts->ba_intlv_ctl = 0;
  1152. printf("Not enough bank(chip-select) for "
  1153. "CS0+CS1+CS2+CS3 on controller %d, "
  1154. "interleaving disabled!\n", ctrl_num);
  1155. }
  1156. if (pdimm[0].capacity != pdimm[1].capacity) {
  1157. popts->ba_intlv_ctl = 0;
  1158. printf("Not identical DIMM size for "
  1159. "CS0+CS1+CS2+CS3 on controller %d, "
  1160. "interleaving disabled!\n", ctrl_num);
  1161. }
  1162. #endif
  1163. break;
  1164. case FSL_DDR_CS0_CS1:
  1165. if (pdimm[0].n_ranks < 2) {
  1166. popts->ba_intlv_ctl = 0;
  1167. printf("Not enough bank(chip-select) for "
  1168. "CS0+CS1 on controller %d, "
  1169. "interleaving disabled!\n", ctrl_num);
  1170. }
  1171. break;
  1172. case FSL_DDR_CS2_CS3:
  1173. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1174. if (pdimm[0].n_ranks < 4) {
  1175. popts->ba_intlv_ctl = 0;
  1176. printf("Not enough bank(chip-select) for CS2+CS3 "
  1177. "on controller %d, interleaving disabled!\n", ctrl_num);
  1178. }
  1179. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1180. if (pdimm[1].n_ranks < 2) {
  1181. popts->ba_intlv_ctl = 0;
  1182. printf("Not enough bank(chip-select) for CS2+CS3 "
  1183. "on controller %d, interleaving disabled!\n", ctrl_num);
  1184. }
  1185. #endif
  1186. break;
  1187. case FSL_DDR_CS0_CS1_AND_CS2_CS3:
  1188. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1189. if (pdimm[0].n_ranks < 4) {
  1190. popts->ba_intlv_ctl = 0;
  1191. printf("Not enough bank(CS) for CS0+CS1 and "
  1192. "CS2+CS3 on controller %d, "
  1193. "interleaving disabled!\n", ctrl_num);
  1194. }
  1195. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1196. if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
  1197. popts->ba_intlv_ctl = 0;
  1198. printf("Not enough bank(CS) for CS0+CS1 and "
  1199. "CS2+CS3 on controller %d, "
  1200. "interleaving disabled!\n", ctrl_num);
  1201. }
  1202. #endif
  1203. break;
  1204. default:
  1205. popts->ba_intlv_ctl = 0;
  1206. break;
  1207. }
  1208. }
  1209. if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
  1210. if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
  1211. popts->addr_hash = 0;
  1212. else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
  1213. "true", buf))
  1214. popts->addr_hash = 1;
  1215. }
  1216. if (pdimm[0].n_ranks == 4)
  1217. popts->quad_rank_present = 1;
  1218. popts->package_3ds = pdimm->package_3ds;
  1219. ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
  1220. if (popts->registered_dimm_en) {
  1221. popts->rcw_override = 1;
  1222. popts->rcw_1 = 0x000a5a00;
  1223. if (ddr_freq <= 800)
  1224. popts->rcw_2 = 0x00000000;
  1225. else if (ddr_freq <= 1066)
  1226. popts->rcw_2 = 0x00100000;
  1227. else if (ddr_freq <= 1333)
  1228. popts->rcw_2 = 0x00200000;
  1229. else
  1230. popts->rcw_2 = 0x00300000;
  1231. }
  1232. fsl_ddr_board_options(popts, pdimm, ctrl_num);
  1233. return 0;
  1234. }
  1235. void check_interleaving_options(fsl_ddr_info_t *pinfo)
  1236. {
  1237. int i, j, k, check_n_ranks, intlv_invalid = 0;
  1238. unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
  1239. unsigned long long check_rank_density;
  1240. struct dimm_params_s *dimm;
  1241. int first_ctrl = pinfo->first_ctrl;
  1242. int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
  1243. /*
  1244. * Check if all controllers are configured for memory
  1245. * controller interleaving. Identical dimms are recommended. At least
  1246. * the size, row and col address should be checked.
  1247. */
  1248. j = 0;
  1249. check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
  1250. check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
  1251. check_n_row_addr = pinfo->dimm_params[first_ctrl][0].n_row_addr;
  1252. check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
  1253. check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
  1254. for (i = first_ctrl; i <= last_ctrl; i++) {
  1255. dimm = &pinfo->dimm_params[i][0];
  1256. if (!pinfo->memctl_opts[i].memctl_interleaving) {
  1257. continue;
  1258. } else if (((check_rank_density != dimm->rank_density) ||
  1259. (check_n_ranks != dimm->n_ranks) ||
  1260. (check_n_row_addr != dimm->n_row_addr) ||
  1261. (check_n_col_addr != dimm->n_col_addr) ||
  1262. (check_intlv !=
  1263. pinfo->memctl_opts[i].memctl_interleaving_mode))){
  1264. intlv_invalid = 1;
  1265. break;
  1266. } else {
  1267. j++;
  1268. }
  1269. }
  1270. if (intlv_invalid) {
  1271. for (i = first_ctrl; i <= last_ctrl; i++)
  1272. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1273. printf("Not all DIMMs are identical. "
  1274. "Memory controller interleaving disabled.\n");
  1275. } else {
  1276. switch (check_intlv) {
  1277. case FSL_DDR_256B_INTERLEAVING:
  1278. case FSL_DDR_CACHE_LINE_INTERLEAVING:
  1279. case FSL_DDR_PAGE_INTERLEAVING:
  1280. case FSL_DDR_BANK_INTERLEAVING:
  1281. case FSL_DDR_SUPERBANK_INTERLEAVING:
  1282. #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
  1283. k = 2;
  1284. #else
  1285. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1286. #endif
  1287. break;
  1288. case FSL_DDR_3WAY_1KB_INTERLEAVING:
  1289. case FSL_DDR_3WAY_4KB_INTERLEAVING:
  1290. case FSL_DDR_3WAY_8KB_INTERLEAVING:
  1291. case FSL_DDR_4WAY_1KB_INTERLEAVING:
  1292. case FSL_DDR_4WAY_4KB_INTERLEAVING:
  1293. case FSL_DDR_4WAY_8KB_INTERLEAVING:
  1294. default:
  1295. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1296. break;
  1297. }
  1298. debug("%d of %d controllers are interleaving.\n", j, k);
  1299. if (j && (j != k)) {
  1300. for (i = first_ctrl; i <= last_ctrl; i++)
  1301. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1302. if ((last_ctrl - first_ctrl) > 1)
  1303. puts("Not all controllers have compatible interleaving mode. All disabled.\n");
  1304. }
  1305. }
  1306. debug("Checking interleaving options completed\n");
  1307. }
  1308. int fsl_use_spd(void)
  1309. {
  1310. int use_spd = 0;
  1311. #ifdef CONFIG_DDR_SPD
  1312. char buffer[HWCONFIG_BUFFER_SIZE];
  1313. char *buf = NULL;
  1314. /*
  1315. * Extract hwconfig from environment since we have not properly setup
  1316. * the environment but need it for ddr config params
  1317. */
  1318. if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
  1319. buf = buffer;
  1320. /* if hwconfig is not enabled, or "sdram" is not defined, use spd */
  1321. if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
  1322. if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
  1323. use_spd = 1;
  1324. else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
  1325. "fixed", buf))
  1326. use_spd = 0;
  1327. else
  1328. use_spd = 1;
  1329. } else
  1330. use_spd = 1;
  1331. #endif
  1332. return use_spd;
  1333. }