options.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418
  1. /*
  2. * Copyright 2008, 2010-2014 Freescale Semiconductor, Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <hwconfig.h>
  8. #include <fsl_ddr_sdram.h>
  9. #include <fsl_ddr.h>
  10. #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3)
  11. #include <asm/arch/clock.h>
  12. #endif
  13. /*
  14. * Use our own stack based buffer before relocation to allow accessing longer
  15. * hwconfig strings that might be in the environment before we've relocated.
  16. * This is pretty fragile on both the use of stack and if the buffer is big
  17. * enough. However we will get a warning from getenv_f for the later.
  18. */
  19. /* Board-specific functions defined in each board's ddr.c */
  20. extern void fsl_ddr_board_options(memctl_options_t *popts,
  21. dimm_params_t *pdimm,
  22. unsigned int ctrl_num);
  23. struct dynamic_odt {
  24. unsigned int odt_rd_cfg;
  25. unsigned int odt_wr_cfg;
  26. unsigned int odt_rtt_norm;
  27. unsigned int odt_rtt_wr;
  28. };
  29. #ifdef CONFIG_SYS_FSL_DDR4
  30. /* Quad rank is not verified yet due availability.
  31. * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
  32. */
  33. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  34. { /* cs0 */
  35. FSL_DDR_ODT_NEVER,
  36. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  37. DDR4_RTT_34_OHM, /* unverified */
  38. DDR4_RTT_120_OHM
  39. },
  40. { /* cs1 */
  41. FSL_DDR_ODT_NEVER,
  42. FSL_DDR_ODT_NEVER,
  43. DDR4_RTT_OFF,
  44. DDR4_RTT_120_OHM
  45. },
  46. { /* cs2 */
  47. FSL_DDR_ODT_NEVER,
  48. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  49. DDR4_RTT_34_OHM,
  50. DDR4_RTT_120_OHM
  51. },
  52. { /* cs3 */
  53. FSL_DDR_ODT_NEVER,
  54. FSL_DDR_ODT_NEVER, /* tied high */
  55. DDR4_RTT_OFF,
  56. DDR4_RTT_120_OHM
  57. }
  58. };
  59. static __maybe_unused const struct dynamic_odt single_D[4] = {
  60. { /* cs0 */
  61. FSL_DDR_ODT_NEVER,
  62. FSL_DDR_ODT_ALL,
  63. DDR4_RTT_40_OHM,
  64. DDR4_RTT_OFF
  65. },
  66. { /* cs1 */
  67. FSL_DDR_ODT_NEVER,
  68. FSL_DDR_ODT_NEVER,
  69. DDR4_RTT_OFF,
  70. DDR4_RTT_OFF
  71. },
  72. {0, 0, 0, 0},
  73. {0, 0, 0, 0}
  74. };
  75. static __maybe_unused const struct dynamic_odt single_S[4] = {
  76. { /* cs0 */
  77. FSL_DDR_ODT_NEVER,
  78. FSL_DDR_ODT_ALL,
  79. DDR4_RTT_40_OHM,
  80. DDR4_RTT_OFF
  81. },
  82. {0, 0, 0, 0},
  83. {0, 0, 0, 0},
  84. {0, 0, 0, 0},
  85. };
  86. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  87. { /* cs0 */
  88. FSL_DDR_ODT_NEVER,
  89. FSL_DDR_ODT_SAME_DIMM,
  90. DDR4_RTT_120_OHM,
  91. DDR4_RTT_OFF
  92. },
  93. { /* cs1 */
  94. FSL_DDR_ODT_OTHER_DIMM,
  95. FSL_DDR_ODT_OTHER_DIMM,
  96. DDR4_RTT_34_OHM,
  97. DDR4_RTT_OFF
  98. },
  99. { /* cs2 */
  100. FSL_DDR_ODT_NEVER,
  101. FSL_DDR_ODT_SAME_DIMM,
  102. DDR4_RTT_120_OHM,
  103. DDR4_RTT_OFF
  104. },
  105. { /* cs3 */
  106. FSL_DDR_ODT_OTHER_DIMM,
  107. FSL_DDR_ODT_OTHER_DIMM,
  108. DDR4_RTT_34_OHM,
  109. DDR4_RTT_OFF
  110. }
  111. };
  112. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  113. { /* cs0 */
  114. FSL_DDR_ODT_NEVER,
  115. FSL_DDR_ODT_SAME_DIMM,
  116. DDR4_RTT_120_OHM,
  117. DDR4_RTT_OFF
  118. },
  119. { /* cs1 */
  120. FSL_DDR_ODT_OTHER_DIMM,
  121. FSL_DDR_ODT_OTHER_DIMM,
  122. DDR4_RTT_34_OHM,
  123. DDR4_RTT_OFF
  124. },
  125. { /* cs2 */
  126. FSL_DDR_ODT_OTHER_DIMM,
  127. FSL_DDR_ODT_ALL,
  128. DDR4_RTT_34_OHM,
  129. DDR4_RTT_120_OHM
  130. },
  131. {0, 0, 0, 0}
  132. };
  133. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  134. { /* cs0 */
  135. FSL_DDR_ODT_OTHER_DIMM,
  136. FSL_DDR_ODT_ALL,
  137. DDR4_RTT_34_OHM,
  138. DDR4_RTT_120_OHM
  139. },
  140. {0, 0, 0, 0},
  141. { /* cs2 */
  142. FSL_DDR_ODT_NEVER,
  143. FSL_DDR_ODT_SAME_DIMM,
  144. DDR4_RTT_120_OHM,
  145. DDR4_RTT_OFF
  146. },
  147. { /* cs3 */
  148. FSL_DDR_ODT_OTHER_DIMM,
  149. FSL_DDR_ODT_OTHER_DIMM,
  150. DDR4_RTT_34_OHM,
  151. DDR4_RTT_OFF
  152. }
  153. };
  154. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  155. { /* cs0 */
  156. FSL_DDR_ODT_OTHER_DIMM,
  157. FSL_DDR_ODT_ALL,
  158. DDR4_RTT_34_OHM,
  159. DDR4_RTT_120_OHM
  160. },
  161. {0, 0, 0, 0},
  162. { /* cs2 */
  163. FSL_DDR_ODT_OTHER_DIMM,
  164. FSL_DDR_ODT_ALL,
  165. DDR4_RTT_34_OHM,
  166. DDR4_RTT_120_OHM
  167. },
  168. {0, 0, 0, 0}
  169. };
  170. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  171. { /* cs0 */
  172. FSL_DDR_ODT_NEVER,
  173. FSL_DDR_ODT_SAME_DIMM,
  174. DDR4_RTT_40_OHM,
  175. DDR4_RTT_OFF
  176. },
  177. { /* cs1 */
  178. FSL_DDR_ODT_NEVER,
  179. FSL_DDR_ODT_NEVER,
  180. DDR4_RTT_OFF,
  181. DDR4_RTT_OFF
  182. },
  183. {0, 0, 0, 0},
  184. {0, 0, 0, 0}
  185. };
  186. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  187. {0, 0, 0, 0},
  188. {0, 0, 0, 0},
  189. { /* cs2 */
  190. FSL_DDR_ODT_NEVER,
  191. FSL_DDR_ODT_SAME_DIMM,
  192. DDR4_RTT_40_OHM,
  193. DDR4_RTT_OFF
  194. },
  195. { /* cs3 */
  196. FSL_DDR_ODT_NEVER,
  197. FSL_DDR_ODT_NEVER,
  198. DDR4_RTT_OFF,
  199. DDR4_RTT_OFF
  200. }
  201. };
  202. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  203. { /* cs0 */
  204. FSL_DDR_ODT_NEVER,
  205. FSL_DDR_ODT_CS,
  206. DDR4_RTT_40_OHM,
  207. DDR4_RTT_OFF
  208. },
  209. {0, 0, 0, 0},
  210. {0, 0, 0, 0},
  211. {0, 0, 0, 0}
  212. };
  213. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  214. {0, 0, 0, 0},
  215. {0, 0, 0, 0},
  216. { /* cs2 */
  217. FSL_DDR_ODT_NEVER,
  218. FSL_DDR_ODT_CS,
  219. DDR4_RTT_40_OHM,
  220. DDR4_RTT_OFF
  221. },
  222. {0, 0, 0, 0}
  223. };
  224. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  225. { /* cs0 */
  226. FSL_DDR_ODT_NEVER,
  227. FSL_DDR_ODT_CS,
  228. DDR4_RTT_120_OHM,
  229. DDR4_RTT_OFF
  230. },
  231. { /* cs1 */
  232. FSL_DDR_ODT_NEVER,
  233. FSL_DDR_ODT_CS,
  234. DDR4_RTT_120_OHM,
  235. DDR4_RTT_OFF
  236. },
  237. { /* cs2 */
  238. FSL_DDR_ODT_NEVER,
  239. FSL_DDR_ODT_CS,
  240. DDR4_RTT_120_OHM,
  241. DDR4_RTT_OFF
  242. },
  243. { /* cs3 */
  244. FSL_DDR_ODT_NEVER,
  245. FSL_DDR_ODT_CS,
  246. DDR4_RTT_120_OHM,
  247. DDR4_RTT_OFF
  248. }
  249. };
  250. #elif defined(CONFIG_SYS_FSL_DDR3)
  251. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  252. { /* cs0 */
  253. FSL_DDR_ODT_NEVER,
  254. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  255. DDR3_RTT_20_OHM,
  256. DDR3_RTT_120_OHM
  257. },
  258. { /* cs1 */
  259. FSL_DDR_ODT_NEVER,
  260. FSL_DDR_ODT_NEVER, /* tied high */
  261. DDR3_RTT_OFF,
  262. DDR3_RTT_120_OHM
  263. },
  264. { /* cs2 */
  265. FSL_DDR_ODT_NEVER,
  266. FSL_DDR_ODT_CS_AND_OTHER_DIMM,
  267. DDR3_RTT_20_OHM,
  268. DDR3_RTT_120_OHM
  269. },
  270. { /* cs3 */
  271. FSL_DDR_ODT_NEVER,
  272. FSL_DDR_ODT_NEVER, /* tied high */
  273. DDR3_RTT_OFF,
  274. DDR3_RTT_120_OHM
  275. }
  276. };
  277. static __maybe_unused const struct dynamic_odt single_D[4] = {
  278. { /* cs0 */
  279. FSL_DDR_ODT_NEVER,
  280. FSL_DDR_ODT_ALL,
  281. DDR3_RTT_40_OHM,
  282. DDR3_RTT_OFF
  283. },
  284. { /* cs1 */
  285. FSL_DDR_ODT_NEVER,
  286. FSL_DDR_ODT_NEVER,
  287. DDR3_RTT_OFF,
  288. DDR3_RTT_OFF
  289. },
  290. {0, 0, 0, 0},
  291. {0, 0, 0, 0}
  292. };
  293. static __maybe_unused const struct dynamic_odt single_S[4] = {
  294. { /* cs0 */
  295. FSL_DDR_ODT_NEVER,
  296. FSL_DDR_ODT_ALL,
  297. DDR3_RTT_40_OHM,
  298. DDR3_RTT_OFF
  299. },
  300. {0, 0, 0, 0},
  301. {0, 0, 0, 0},
  302. {0, 0, 0, 0},
  303. };
  304. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  305. { /* cs0 */
  306. FSL_DDR_ODT_NEVER,
  307. FSL_DDR_ODT_SAME_DIMM,
  308. DDR3_RTT_120_OHM,
  309. DDR3_RTT_OFF
  310. },
  311. { /* cs1 */
  312. FSL_DDR_ODT_OTHER_DIMM,
  313. FSL_DDR_ODT_OTHER_DIMM,
  314. DDR3_RTT_30_OHM,
  315. DDR3_RTT_OFF
  316. },
  317. { /* cs2 */
  318. FSL_DDR_ODT_NEVER,
  319. FSL_DDR_ODT_SAME_DIMM,
  320. DDR3_RTT_120_OHM,
  321. DDR3_RTT_OFF
  322. },
  323. { /* cs3 */
  324. FSL_DDR_ODT_OTHER_DIMM,
  325. FSL_DDR_ODT_OTHER_DIMM,
  326. DDR3_RTT_30_OHM,
  327. DDR3_RTT_OFF
  328. }
  329. };
  330. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  331. { /* cs0 */
  332. FSL_DDR_ODT_NEVER,
  333. FSL_DDR_ODT_SAME_DIMM,
  334. DDR3_RTT_120_OHM,
  335. DDR3_RTT_OFF
  336. },
  337. { /* cs1 */
  338. FSL_DDR_ODT_OTHER_DIMM,
  339. FSL_DDR_ODT_OTHER_DIMM,
  340. DDR3_RTT_30_OHM,
  341. DDR3_RTT_OFF
  342. },
  343. { /* cs2 */
  344. FSL_DDR_ODT_OTHER_DIMM,
  345. FSL_DDR_ODT_ALL,
  346. DDR3_RTT_20_OHM,
  347. DDR3_RTT_120_OHM
  348. },
  349. {0, 0, 0, 0}
  350. };
  351. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  352. { /* cs0 */
  353. FSL_DDR_ODT_OTHER_DIMM,
  354. FSL_DDR_ODT_ALL,
  355. DDR3_RTT_20_OHM,
  356. DDR3_RTT_120_OHM
  357. },
  358. {0, 0, 0, 0},
  359. { /* cs2 */
  360. FSL_DDR_ODT_NEVER,
  361. FSL_DDR_ODT_SAME_DIMM,
  362. DDR3_RTT_120_OHM,
  363. DDR3_RTT_OFF
  364. },
  365. { /* cs3 */
  366. FSL_DDR_ODT_OTHER_DIMM,
  367. FSL_DDR_ODT_OTHER_DIMM,
  368. DDR3_RTT_20_OHM,
  369. DDR3_RTT_OFF
  370. }
  371. };
  372. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  373. { /* cs0 */
  374. FSL_DDR_ODT_OTHER_DIMM,
  375. FSL_DDR_ODT_ALL,
  376. DDR3_RTT_30_OHM,
  377. DDR3_RTT_120_OHM
  378. },
  379. {0, 0, 0, 0},
  380. { /* cs2 */
  381. FSL_DDR_ODT_OTHER_DIMM,
  382. FSL_DDR_ODT_ALL,
  383. DDR3_RTT_30_OHM,
  384. DDR3_RTT_120_OHM
  385. },
  386. {0, 0, 0, 0}
  387. };
  388. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  389. { /* cs0 */
  390. FSL_DDR_ODT_NEVER,
  391. FSL_DDR_ODT_SAME_DIMM,
  392. DDR3_RTT_40_OHM,
  393. DDR3_RTT_OFF
  394. },
  395. { /* cs1 */
  396. FSL_DDR_ODT_NEVER,
  397. FSL_DDR_ODT_NEVER,
  398. DDR3_RTT_OFF,
  399. DDR3_RTT_OFF
  400. },
  401. {0, 0, 0, 0},
  402. {0, 0, 0, 0}
  403. };
  404. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  405. {0, 0, 0, 0},
  406. {0, 0, 0, 0},
  407. { /* cs2 */
  408. FSL_DDR_ODT_NEVER,
  409. FSL_DDR_ODT_SAME_DIMM,
  410. DDR3_RTT_40_OHM,
  411. DDR3_RTT_OFF
  412. },
  413. { /* cs3 */
  414. FSL_DDR_ODT_NEVER,
  415. FSL_DDR_ODT_NEVER,
  416. DDR3_RTT_OFF,
  417. DDR3_RTT_OFF
  418. }
  419. };
  420. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  421. { /* cs0 */
  422. FSL_DDR_ODT_NEVER,
  423. FSL_DDR_ODT_CS,
  424. DDR3_RTT_40_OHM,
  425. DDR3_RTT_OFF
  426. },
  427. {0, 0, 0, 0},
  428. {0, 0, 0, 0},
  429. {0, 0, 0, 0}
  430. };
  431. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  432. {0, 0, 0, 0},
  433. {0, 0, 0, 0},
  434. { /* cs2 */
  435. FSL_DDR_ODT_NEVER,
  436. FSL_DDR_ODT_CS,
  437. DDR3_RTT_40_OHM,
  438. DDR3_RTT_OFF
  439. },
  440. {0, 0, 0, 0}
  441. };
  442. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  443. { /* cs0 */
  444. FSL_DDR_ODT_NEVER,
  445. FSL_DDR_ODT_CS,
  446. DDR3_RTT_120_OHM,
  447. DDR3_RTT_OFF
  448. },
  449. { /* cs1 */
  450. FSL_DDR_ODT_NEVER,
  451. FSL_DDR_ODT_CS,
  452. DDR3_RTT_120_OHM,
  453. DDR3_RTT_OFF
  454. },
  455. { /* cs2 */
  456. FSL_DDR_ODT_NEVER,
  457. FSL_DDR_ODT_CS,
  458. DDR3_RTT_120_OHM,
  459. DDR3_RTT_OFF
  460. },
  461. { /* cs3 */
  462. FSL_DDR_ODT_NEVER,
  463. FSL_DDR_ODT_CS,
  464. DDR3_RTT_120_OHM,
  465. DDR3_RTT_OFF
  466. }
  467. };
  468. #else /* CONFIG_SYS_FSL_DDR3 */
  469. static __maybe_unused const struct dynamic_odt single_Q[4] = {
  470. {0, 0, 0, 0},
  471. {0, 0, 0, 0},
  472. {0, 0, 0, 0},
  473. {0, 0, 0, 0}
  474. };
  475. static __maybe_unused const struct dynamic_odt single_D[4] = {
  476. { /* cs0 */
  477. FSL_DDR_ODT_NEVER,
  478. FSL_DDR_ODT_ALL,
  479. DDR2_RTT_150_OHM,
  480. DDR2_RTT_OFF
  481. },
  482. { /* cs1 */
  483. FSL_DDR_ODT_NEVER,
  484. FSL_DDR_ODT_NEVER,
  485. DDR2_RTT_OFF,
  486. DDR2_RTT_OFF
  487. },
  488. {0, 0, 0, 0},
  489. {0, 0, 0, 0}
  490. };
  491. static __maybe_unused const struct dynamic_odt single_S[4] = {
  492. { /* cs0 */
  493. FSL_DDR_ODT_NEVER,
  494. FSL_DDR_ODT_ALL,
  495. DDR2_RTT_150_OHM,
  496. DDR2_RTT_OFF
  497. },
  498. {0, 0, 0, 0},
  499. {0, 0, 0, 0},
  500. {0, 0, 0, 0},
  501. };
  502. static __maybe_unused const struct dynamic_odt dual_DD[4] = {
  503. { /* cs0 */
  504. FSL_DDR_ODT_OTHER_DIMM,
  505. FSL_DDR_ODT_OTHER_DIMM,
  506. DDR2_RTT_75_OHM,
  507. DDR2_RTT_OFF
  508. },
  509. { /* cs1 */
  510. FSL_DDR_ODT_NEVER,
  511. FSL_DDR_ODT_NEVER,
  512. DDR2_RTT_OFF,
  513. DDR2_RTT_OFF
  514. },
  515. { /* cs2 */
  516. FSL_DDR_ODT_OTHER_DIMM,
  517. FSL_DDR_ODT_OTHER_DIMM,
  518. DDR2_RTT_75_OHM,
  519. DDR2_RTT_OFF
  520. },
  521. { /* cs3 */
  522. FSL_DDR_ODT_NEVER,
  523. FSL_DDR_ODT_NEVER,
  524. DDR2_RTT_OFF,
  525. DDR2_RTT_OFF
  526. }
  527. };
  528. static __maybe_unused const struct dynamic_odt dual_DS[4] = {
  529. { /* cs0 */
  530. FSL_DDR_ODT_OTHER_DIMM,
  531. FSL_DDR_ODT_OTHER_DIMM,
  532. DDR2_RTT_75_OHM,
  533. DDR2_RTT_OFF
  534. },
  535. { /* cs1 */
  536. FSL_DDR_ODT_NEVER,
  537. FSL_DDR_ODT_NEVER,
  538. DDR2_RTT_OFF,
  539. DDR2_RTT_OFF
  540. },
  541. { /* cs2 */
  542. FSL_DDR_ODT_OTHER_DIMM,
  543. FSL_DDR_ODT_OTHER_DIMM,
  544. DDR2_RTT_75_OHM,
  545. DDR2_RTT_OFF
  546. },
  547. {0, 0, 0, 0}
  548. };
  549. static __maybe_unused const struct dynamic_odt dual_SD[4] = {
  550. { /* cs0 */
  551. FSL_DDR_ODT_OTHER_DIMM,
  552. FSL_DDR_ODT_OTHER_DIMM,
  553. DDR2_RTT_75_OHM,
  554. DDR2_RTT_OFF
  555. },
  556. {0, 0, 0, 0},
  557. { /* cs2 */
  558. FSL_DDR_ODT_OTHER_DIMM,
  559. FSL_DDR_ODT_OTHER_DIMM,
  560. DDR2_RTT_75_OHM,
  561. DDR2_RTT_OFF
  562. },
  563. { /* cs3 */
  564. FSL_DDR_ODT_NEVER,
  565. FSL_DDR_ODT_NEVER,
  566. DDR2_RTT_OFF,
  567. DDR2_RTT_OFF
  568. }
  569. };
  570. static __maybe_unused const struct dynamic_odt dual_SS[4] = {
  571. { /* cs0 */
  572. FSL_DDR_ODT_OTHER_DIMM,
  573. FSL_DDR_ODT_OTHER_DIMM,
  574. DDR2_RTT_75_OHM,
  575. DDR2_RTT_OFF
  576. },
  577. {0, 0, 0, 0},
  578. { /* cs2 */
  579. FSL_DDR_ODT_OTHER_DIMM,
  580. FSL_DDR_ODT_OTHER_DIMM,
  581. DDR2_RTT_75_OHM,
  582. DDR2_RTT_OFF
  583. },
  584. {0, 0, 0, 0}
  585. };
  586. static __maybe_unused const struct dynamic_odt dual_D0[4] = {
  587. { /* cs0 */
  588. FSL_DDR_ODT_NEVER,
  589. FSL_DDR_ODT_ALL,
  590. DDR2_RTT_150_OHM,
  591. DDR2_RTT_OFF
  592. },
  593. { /* cs1 */
  594. FSL_DDR_ODT_NEVER,
  595. FSL_DDR_ODT_NEVER,
  596. DDR2_RTT_OFF,
  597. DDR2_RTT_OFF
  598. },
  599. {0, 0, 0, 0},
  600. {0, 0, 0, 0}
  601. };
  602. static __maybe_unused const struct dynamic_odt dual_0D[4] = {
  603. {0, 0, 0, 0},
  604. {0, 0, 0, 0},
  605. { /* cs2 */
  606. FSL_DDR_ODT_NEVER,
  607. FSL_DDR_ODT_ALL,
  608. DDR2_RTT_150_OHM,
  609. DDR2_RTT_OFF
  610. },
  611. { /* cs3 */
  612. FSL_DDR_ODT_NEVER,
  613. FSL_DDR_ODT_NEVER,
  614. DDR2_RTT_OFF,
  615. DDR2_RTT_OFF
  616. }
  617. };
  618. static __maybe_unused const struct dynamic_odt dual_S0[4] = {
  619. { /* cs0 */
  620. FSL_DDR_ODT_NEVER,
  621. FSL_DDR_ODT_CS,
  622. DDR2_RTT_150_OHM,
  623. DDR2_RTT_OFF
  624. },
  625. {0, 0, 0, 0},
  626. {0, 0, 0, 0},
  627. {0, 0, 0, 0}
  628. };
  629. static __maybe_unused const struct dynamic_odt dual_0S[4] = {
  630. {0, 0, 0, 0},
  631. {0, 0, 0, 0},
  632. { /* cs2 */
  633. FSL_DDR_ODT_NEVER,
  634. FSL_DDR_ODT_CS,
  635. DDR2_RTT_150_OHM,
  636. DDR2_RTT_OFF
  637. },
  638. {0, 0, 0, 0}
  639. };
  640. static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
  641. { /* cs0 */
  642. FSL_DDR_ODT_NEVER,
  643. FSL_DDR_ODT_CS,
  644. DDR2_RTT_75_OHM,
  645. DDR2_RTT_OFF
  646. },
  647. { /* cs1 */
  648. FSL_DDR_ODT_NEVER,
  649. FSL_DDR_ODT_NEVER,
  650. DDR2_RTT_OFF,
  651. DDR2_RTT_OFF
  652. },
  653. { /* cs2 */
  654. FSL_DDR_ODT_NEVER,
  655. FSL_DDR_ODT_CS,
  656. DDR2_RTT_75_OHM,
  657. DDR2_RTT_OFF
  658. },
  659. { /* cs3 */
  660. FSL_DDR_ODT_NEVER,
  661. FSL_DDR_ODT_NEVER,
  662. DDR2_RTT_OFF,
  663. DDR2_RTT_OFF
  664. }
  665. };
  666. #endif
  667. /*
  668. * Automatically seleect bank interleaving mode based on DIMMs
  669. * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
  670. * This function only deal with one or two slots per controller.
  671. */
  672. static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
  673. {
  674. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  675. if (pdimm[0].n_ranks == 4)
  676. return FSL_DDR_CS0_CS1_CS2_CS3;
  677. else if (pdimm[0].n_ranks == 2)
  678. return FSL_DDR_CS0_CS1;
  679. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  680. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  681. if (pdimm[0].n_ranks == 4)
  682. return FSL_DDR_CS0_CS1_CS2_CS3;
  683. #endif
  684. if (pdimm[0].n_ranks == 2) {
  685. if (pdimm[1].n_ranks == 2)
  686. return FSL_DDR_CS0_CS1_CS2_CS3;
  687. else
  688. return FSL_DDR_CS0_CS1;
  689. }
  690. #endif
  691. return 0;
  692. }
  693. unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
  694. memctl_options_t *popts,
  695. dimm_params_t *pdimm,
  696. unsigned int ctrl_num)
  697. {
  698. unsigned int i;
  699. char buffer[HWCONFIG_BUFFER_SIZE];
  700. char *buf = NULL;
  701. #if defined(CONFIG_SYS_FSL_DDR3) || \
  702. defined(CONFIG_SYS_FSL_DDR2) || \
  703. defined(CONFIG_SYS_FSL_DDR4)
  704. const struct dynamic_odt *pdodt = odt_unknown;
  705. #endif
  706. ulong ddr_freq;
  707. /*
  708. * Extract hwconfig from environment since we have not properly setup
  709. * the environment but need it for ddr config params
  710. */
  711. if (getenv_f("hwconfig", buffer, sizeof(buffer)) > 0)
  712. buf = buffer;
  713. #if defined(CONFIG_SYS_FSL_DDR3) || \
  714. defined(CONFIG_SYS_FSL_DDR2) || \
  715. defined(CONFIG_SYS_FSL_DDR4)
  716. /* Chip select options. */
  717. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  718. switch (pdimm[0].n_ranks) {
  719. case 1:
  720. pdodt = single_S;
  721. break;
  722. case 2:
  723. pdodt = single_D;
  724. break;
  725. case 4:
  726. pdodt = single_Q;
  727. break;
  728. }
  729. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  730. switch (pdimm[0].n_ranks) {
  731. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  732. case 4:
  733. pdodt = single_Q;
  734. if (pdimm[1].n_ranks)
  735. printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
  736. break;
  737. #endif
  738. case 2:
  739. switch (pdimm[1].n_ranks) {
  740. case 2:
  741. pdodt = dual_DD;
  742. break;
  743. case 1:
  744. pdodt = dual_DS;
  745. break;
  746. case 0:
  747. pdodt = dual_D0;
  748. break;
  749. }
  750. break;
  751. case 1:
  752. switch (pdimm[1].n_ranks) {
  753. case 2:
  754. pdodt = dual_SD;
  755. break;
  756. case 1:
  757. pdodt = dual_SS;
  758. break;
  759. case 0:
  760. pdodt = dual_S0;
  761. break;
  762. }
  763. break;
  764. case 0:
  765. switch (pdimm[1].n_ranks) {
  766. case 2:
  767. pdodt = dual_0D;
  768. break;
  769. case 1:
  770. pdodt = dual_0S;
  771. break;
  772. }
  773. break;
  774. }
  775. #endif /* CONFIG_DIMM_SLOTS_PER_CTLR */
  776. #endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */
  777. /* Pick chip-select local options. */
  778. for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
  779. #if defined(CONFIG_SYS_FSL_DDR3) || \
  780. defined(CONFIG_SYS_FSL_DDR2) || \
  781. defined(CONFIG_SYS_FSL_DDR4)
  782. popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
  783. popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
  784. popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
  785. popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
  786. #else
  787. popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
  788. popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
  789. #endif
  790. popts->cs_local_opts[i].auto_precharge = 0;
  791. }
  792. /* Pick interleaving mode. */
  793. /*
  794. * 0 = no interleaving
  795. * 1 = interleaving between 2 controllers
  796. */
  797. popts->memctl_interleaving = 0;
  798. /*
  799. * 0 = cacheline
  800. * 1 = page
  801. * 2 = (logical) bank
  802. * 3 = superbank (only if CS interleaving is enabled)
  803. */
  804. popts->memctl_interleaving_mode = 0;
  805. /*
  806. * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
  807. * 1: page: bit to the left of the column bits selects the memctl
  808. * 2: bank: bit to the left of the bank bits selects the memctl
  809. * 3: superbank: bit to the left of the chip select selects the memctl
  810. *
  811. * NOTE: ba_intlv (rank interleaving) is independent of memory
  812. * controller interleaving; it is only within a memory controller.
  813. * Must use superbank interleaving if rank interleaving is used and
  814. * memory controller interleaving is enabled.
  815. */
  816. /*
  817. * 0 = no
  818. * 0x40 = CS0,CS1
  819. * 0x20 = CS2,CS3
  820. * 0x60 = CS0,CS1 + CS2,CS3
  821. * 0x04 = CS0,CS1,CS2,CS3
  822. */
  823. popts->ba_intlv_ctl = 0;
  824. /* Memory Organization Parameters */
  825. popts->registered_dimm_en = common_dimm->all_dimms_registered;
  826. /* Operational Mode Paramters */
  827. /* Pick ECC modes */
  828. popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */
  829. #ifdef CONFIG_DDR_ECC
  830. if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
  831. if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
  832. popts->ecc_mode = 1;
  833. } else
  834. popts->ecc_mode = 1;
  835. #endif
  836. /* 1 = use memory controler to init data */
  837. popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
  838. /*
  839. * Choose DQS config
  840. * 0 for DDR1
  841. * 1 for DDR2
  842. */
  843. #if defined(CONFIG_SYS_FSL_DDR1)
  844. popts->dqs_config = 0;
  845. #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
  846. popts->dqs_config = 1;
  847. #endif
  848. /* Choose self-refresh during sleep. */
  849. popts->self_refresh_in_sleep = 1;
  850. /* Choose dynamic power management mode. */
  851. popts->dynamic_power = 0;
  852. /*
  853. * check first dimm for primary sdram width
  854. * presuming all dimms are similar
  855. * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
  856. */
  857. #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
  858. if (pdimm[0].n_ranks != 0) {
  859. if ((pdimm[0].data_width >= 64) && \
  860. (pdimm[0].data_width <= 72))
  861. popts->data_bus_width = 0;
  862. else if ((pdimm[0].data_width >= 32) && \
  863. (pdimm[0].data_width <= 40))
  864. popts->data_bus_width = 1;
  865. else {
  866. panic("Error: data width %u is invalid!\n",
  867. pdimm[0].data_width);
  868. }
  869. }
  870. #else
  871. if (pdimm[0].n_ranks != 0) {
  872. if (pdimm[0].primary_sdram_width == 64)
  873. popts->data_bus_width = 0;
  874. else if (pdimm[0].primary_sdram_width == 32)
  875. popts->data_bus_width = 1;
  876. else if (pdimm[0].primary_sdram_width == 16)
  877. popts->data_bus_width = 2;
  878. else {
  879. panic("Error: primary sdram width %u is invalid!\n",
  880. pdimm[0].primary_sdram_width);
  881. }
  882. }
  883. #endif
  884. popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
  885. /* Choose burst length. */
  886. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  887. #if defined(CONFIG_E500MC)
  888. popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */
  889. popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
  890. #else
  891. if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
  892. /* 32-bit or 16-bit bus */
  893. popts->otf_burst_chop_en = 0;
  894. popts->burst_length = DDR_BL8;
  895. } else {
  896. popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */
  897. popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
  898. }
  899. #endif
  900. #else
  901. popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
  902. #endif
  903. /* Choose ddr controller address mirror mode */
  904. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  905. for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
  906. if (pdimm[i].n_ranks) {
  907. popts->mirrored_dimm = pdimm[i].mirrored_dimm;
  908. break;
  909. }
  910. }
  911. #endif
  912. /* Global Timing Parameters. */
  913. debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
  914. /* Pick a caslat override. */
  915. popts->cas_latency_override = 0;
  916. popts->cas_latency_override_value = 3;
  917. if (popts->cas_latency_override) {
  918. debug("using caslat override value = %u\n",
  919. popts->cas_latency_override_value);
  920. }
  921. /* Decide whether to use the computed derated latency */
  922. popts->use_derated_caslat = 0;
  923. /* Choose an additive latency. */
  924. popts->additive_latency_override = 0;
  925. popts->additive_latency_override_value = 3;
  926. if (popts->additive_latency_override) {
  927. debug("using additive latency override value = %u\n",
  928. popts->additive_latency_override_value);
  929. }
  930. /*
  931. * 2T_EN setting
  932. *
  933. * Factors to consider for 2T_EN:
  934. * - number of DIMMs installed
  935. * - number of components, number of active ranks
  936. * - how much time you want to spend playing around
  937. */
  938. popts->twot_en = 0;
  939. popts->threet_en = 0;
  940. /* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
  941. if (popts->registered_dimm_en)
  942. popts->ap_en = 1; /* 0 = disable, 1 = enable */
  943. else
  944. popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
  945. if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
  946. if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
  947. if (popts->registered_dimm_en ||
  948. (CONFIG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
  949. popts->ap_en = 1;
  950. }
  951. }
  952. /*
  953. * BSTTOPRE precharge interval
  954. *
  955. * Set this to 0 for global auto precharge
  956. * The value of 0x100 has been used for DDR1, DDR2, DDR3.
  957. * It is not wrong. Any value should be OK. The performance depends on
  958. * applications. There is no one good value for all. One way to set
  959. * is to use 1/4 of refint value.
  960. */
  961. popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
  962. >> 2;
  963. /*
  964. * Window for four activates -- tFAW
  965. *
  966. * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
  967. * FIXME: varies depending upon number of column addresses or data
  968. * FIXME: width, was considering looking at pdimm->primary_sdram_width
  969. */
  970. #if defined(CONFIG_SYS_FSL_DDR1)
  971. popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
  972. #elif defined(CONFIG_SYS_FSL_DDR2)
  973. /*
  974. * x4/x8; some datasheets have 35000
  975. * x16 wide columns only? Use 50000?
  976. */
  977. popts->tfaw_window_four_activates_ps = 37500;
  978. #else
  979. popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
  980. #endif
  981. popts->zq_en = 0;
  982. popts->wrlvl_en = 0;
  983. #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
  984. /*
  985. * due to ddr3 dimm is fly-by topology
  986. * we suggest to enable write leveling to
  987. * meet the tQDSS under different loading.
  988. */
  989. popts->wrlvl_en = 1;
  990. popts->zq_en = 1;
  991. popts->wrlvl_override = 0;
  992. #endif
  993. /*
  994. * Check interleaving configuration from environment.
  995. * Please refer to doc/README.fsl-ddr for the detail.
  996. *
  997. * If memory controller interleaving is enabled, then the data
  998. * bus widths must be programmed identically for all memory controllers.
  999. *
  1000. * Attempt to set all controllers to the same chip select
  1001. * interleaving mode. It will do a best effort to get the
  1002. * requested ranks interleaved together such that the result
  1003. * should be a subset of the requested configuration.
  1004. *
  1005. * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
  1006. * with 256 Byte is enabled.
  1007. */
  1008. #if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
  1009. if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
  1010. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1011. ;
  1012. #else
  1013. goto done;
  1014. #endif
  1015. if (pdimm[0].n_ranks == 0) {
  1016. printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
  1017. popts->memctl_interleaving = 0;
  1018. goto done;
  1019. }
  1020. popts->memctl_interleaving = 1;
  1021. #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
  1022. popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
  1023. popts->memctl_interleaving = 1;
  1024. debug("256 Byte interleaving\n");
  1025. #else
  1026. /*
  1027. * test null first. if CONFIG_HWCONFIG is not defined
  1028. * hwconfig_arg_cmp returns non-zero
  1029. */
  1030. if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
  1031. "null", buf)) {
  1032. popts->memctl_interleaving = 0;
  1033. debug("memory controller interleaving disabled.\n");
  1034. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1035. "ctlr_intlv",
  1036. "cacheline", buf)) {
  1037. popts->memctl_interleaving_mode =
  1038. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1039. 0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
  1040. popts->memctl_interleaving =
  1041. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1042. 0 : 1;
  1043. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1044. "ctlr_intlv",
  1045. "page", buf)) {
  1046. popts->memctl_interleaving_mode =
  1047. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1048. 0 : FSL_DDR_PAGE_INTERLEAVING;
  1049. popts->memctl_interleaving =
  1050. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1051. 0 : 1;
  1052. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1053. "ctlr_intlv",
  1054. "bank", buf)) {
  1055. popts->memctl_interleaving_mode =
  1056. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1057. 0 : FSL_DDR_BANK_INTERLEAVING;
  1058. popts->memctl_interleaving =
  1059. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1060. 0 : 1;
  1061. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1062. "ctlr_intlv",
  1063. "superbank", buf)) {
  1064. popts->memctl_interleaving_mode =
  1065. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1066. 0 : FSL_DDR_SUPERBANK_INTERLEAVING;
  1067. popts->memctl_interleaving =
  1068. ((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
  1069. 0 : 1;
  1070. #if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
  1071. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1072. "ctlr_intlv",
  1073. "3way_1KB", buf)) {
  1074. popts->memctl_interleaving_mode =
  1075. FSL_DDR_3WAY_1KB_INTERLEAVING;
  1076. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1077. "ctlr_intlv",
  1078. "3way_4KB", buf)) {
  1079. popts->memctl_interleaving_mode =
  1080. FSL_DDR_3WAY_4KB_INTERLEAVING;
  1081. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1082. "ctlr_intlv",
  1083. "3way_8KB", buf)) {
  1084. popts->memctl_interleaving_mode =
  1085. FSL_DDR_3WAY_8KB_INTERLEAVING;
  1086. #elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
  1087. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1088. "ctlr_intlv",
  1089. "4way_1KB", buf)) {
  1090. popts->memctl_interleaving_mode =
  1091. FSL_DDR_4WAY_1KB_INTERLEAVING;
  1092. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1093. "ctlr_intlv",
  1094. "4way_4KB", buf)) {
  1095. popts->memctl_interleaving_mode =
  1096. FSL_DDR_4WAY_4KB_INTERLEAVING;
  1097. } else if (hwconfig_subarg_cmp_f("fsl_ddr",
  1098. "ctlr_intlv",
  1099. "4way_8KB", buf)) {
  1100. popts->memctl_interleaving_mode =
  1101. FSL_DDR_4WAY_8KB_INTERLEAVING;
  1102. #endif
  1103. } else {
  1104. popts->memctl_interleaving = 0;
  1105. printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
  1106. }
  1107. #endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */
  1108. done:
  1109. #endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
  1110. if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
  1111. (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
  1112. /* test null first. if CONFIG_HWCONFIG is not defined,
  1113. * hwconfig_subarg_cmp_f returns non-zero */
  1114. if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1115. "null", buf))
  1116. debug("bank interleaving disabled.\n");
  1117. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1118. "cs0_cs1", buf))
  1119. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
  1120. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1121. "cs2_cs3", buf))
  1122. popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
  1123. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1124. "cs0_cs1_and_cs2_cs3", buf))
  1125. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
  1126. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1127. "cs0_cs1_cs2_cs3", buf))
  1128. popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
  1129. else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
  1130. "auto", buf))
  1131. popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
  1132. else
  1133. printf("hwconfig has unrecognized parameter for bank_intlv.\n");
  1134. switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
  1135. case FSL_DDR_CS0_CS1_CS2_CS3:
  1136. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1137. if (pdimm[0].n_ranks < 4) {
  1138. popts->ba_intlv_ctl = 0;
  1139. printf("Not enough bank(chip-select) for "
  1140. "CS0+CS1+CS2+CS3 on controller %d, "
  1141. "interleaving disabled!\n", ctrl_num);
  1142. }
  1143. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1144. #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
  1145. if (pdimm[0].n_ranks == 4)
  1146. break;
  1147. #endif
  1148. if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
  1149. popts->ba_intlv_ctl = 0;
  1150. printf("Not enough bank(chip-select) for "
  1151. "CS0+CS1+CS2+CS3 on controller %d, "
  1152. "interleaving disabled!\n", ctrl_num);
  1153. }
  1154. if (pdimm[0].capacity != pdimm[1].capacity) {
  1155. popts->ba_intlv_ctl = 0;
  1156. printf("Not identical DIMM size for "
  1157. "CS0+CS1+CS2+CS3 on controller %d, "
  1158. "interleaving disabled!\n", ctrl_num);
  1159. }
  1160. #endif
  1161. break;
  1162. case FSL_DDR_CS0_CS1:
  1163. if (pdimm[0].n_ranks < 2) {
  1164. popts->ba_intlv_ctl = 0;
  1165. printf("Not enough bank(chip-select) for "
  1166. "CS0+CS1 on controller %d, "
  1167. "interleaving disabled!\n", ctrl_num);
  1168. }
  1169. break;
  1170. case FSL_DDR_CS2_CS3:
  1171. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1172. if (pdimm[0].n_ranks < 4) {
  1173. popts->ba_intlv_ctl = 0;
  1174. printf("Not enough bank(chip-select) for CS2+CS3 "
  1175. "on controller %d, interleaving disabled!\n", ctrl_num);
  1176. }
  1177. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1178. if (pdimm[1].n_ranks < 2) {
  1179. popts->ba_intlv_ctl = 0;
  1180. printf("Not enough bank(chip-select) for CS2+CS3 "
  1181. "on controller %d, interleaving disabled!\n", ctrl_num);
  1182. }
  1183. #endif
  1184. break;
  1185. case FSL_DDR_CS0_CS1_AND_CS2_CS3:
  1186. #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
  1187. if (pdimm[0].n_ranks < 4) {
  1188. popts->ba_intlv_ctl = 0;
  1189. printf("Not enough bank(CS) for CS0+CS1 and "
  1190. "CS2+CS3 on controller %d, "
  1191. "interleaving disabled!\n", ctrl_num);
  1192. }
  1193. #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
  1194. if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
  1195. popts->ba_intlv_ctl = 0;
  1196. printf("Not enough bank(CS) for CS0+CS1 and "
  1197. "CS2+CS3 on controller %d, "
  1198. "interleaving disabled!\n", ctrl_num);
  1199. }
  1200. #endif
  1201. break;
  1202. default:
  1203. popts->ba_intlv_ctl = 0;
  1204. break;
  1205. }
  1206. }
  1207. if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
  1208. if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
  1209. popts->addr_hash = 0;
  1210. else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
  1211. "true", buf))
  1212. popts->addr_hash = 1;
  1213. }
  1214. if (pdimm[0].n_ranks == 4)
  1215. popts->quad_rank_present = 1;
  1216. ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
  1217. if (popts->registered_dimm_en) {
  1218. popts->rcw_override = 1;
  1219. popts->rcw_1 = 0x000a5a00;
  1220. if (ddr_freq <= 800)
  1221. popts->rcw_2 = 0x00000000;
  1222. else if (ddr_freq <= 1066)
  1223. popts->rcw_2 = 0x00100000;
  1224. else if (ddr_freq <= 1333)
  1225. popts->rcw_2 = 0x00200000;
  1226. else
  1227. popts->rcw_2 = 0x00300000;
  1228. }
  1229. fsl_ddr_board_options(popts, pdimm, ctrl_num);
  1230. return 0;
  1231. }
  1232. void check_interleaving_options(fsl_ddr_info_t *pinfo)
  1233. {
  1234. int i, j, k, check_n_ranks, intlv_invalid = 0;
  1235. unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
  1236. unsigned long long check_rank_density;
  1237. struct dimm_params_s *dimm;
  1238. int first_ctrl = pinfo->first_ctrl;
  1239. int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
  1240. /*
  1241. * Check if all controllers are configured for memory
  1242. * controller interleaving. Identical dimms are recommended. At least
  1243. * the size, row and col address should be checked.
  1244. */
  1245. j = 0;
  1246. check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
  1247. check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
  1248. check_n_row_addr = pinfo->dimm_params[first_ctrl][0].n_row_addr;
  1249. check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
  1250. check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
  1251. for (i = first_ctrl; i <= last_ctrl; i++) {
  1252. dimm = &pinfo->dimm_params[i][0];
  1253. if (!pinfo->memctl_opts[i].memctl_interleaving) {
  1254. continue;
  1255. } else if (((check_rank_density != dimm->rank_density) ||
  1256. (check_n_ranks != dimm->n_ranks) ||
  1257. (check_n_row_addr != dimm->n_row_addr) ||
  1258. (check_n_col_addr != dimm->n_col_addr) ||
  1259. (check_intlv !=
  1260. pinfo->memctl_opts[i].memctl_interleaving_mode))){
  1261. intlv_invalid = 1;
  1262. break;
  1263. } else {
  1264. j++;
  1265. }
  1266. }
  1267. if (intlv_invalid) {
  1268. for (i = first_ctrl; i <= last_ctrl; i++)
  1269. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1270. printf("Not all DIMMs are identical. "
  1271. "Memory controller interleaving disabled.\n");
  1272. } else {
  1273. switch (check_intlv) {
  1274. case FSL_DDR_256B_INTERLEAVING:
  1275. case FSL_DDR_CACHE_LINE_INTERLEAVING:
  1276. case FSL_DDR_PAGE_INTERLEAVING:
  1277. case FSL_DDR_BANK_INTERLEAVING:
  1278. case FSL_DDR_SUPERBANK_INTERLEAVING:
  1279. #if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
  1280. k = 2;
  1281. #else
  1282. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1283. #endif
  1284. break;
  1285. case FSL_DDR_3WAY_1KB_INTERLEAVING:
  1286. case FSL_DDR_3WAY_4KB_INTERLEAVING:
  1287. case FSL_DDR_3WAY_8KB_INTERLEAVING:
  1288. case FSL_DDR_4WAY_1KB_INTERLEAVING:
  1289. case FSL_DDR_4WAY_4KB_INTERLEAVING:
  1290. case FSL_DDR_4WAY_8KB_INTERLEAVING:
  1291. default:
  1292. k = CONFIG_SYS_NUM_DDR_CTLRS;
  1293. break;
  1294. }
  1295. debug("%d of %d controllers are interleaving.\n", j, k);
  1296. if (j && (j != k)) {
  1297. for (i = first_ctrl; i <= last_ctrl; i++)
  1298. pinfo->memctl_opts[i].memctl_interleaving = 0;
  1299. if ((last_ctrl - first_ctrl) > 1)
  1300. puts("Not all controllers have compatible interleaving mode. All disabled.\n");
  1301. }
  1302. }
  1303. debug("Checking interleaving options completed\n");
  1304. }
  1305. int fsl_use_spd(void)
  1306. {
  1307. int use_spd = 0;
  1308. #ifdef CONFIG_DDR_SPD
  1309. char buffer[HWCONFIG_BUFFER_SIZE];
  1310. char *buf = NULL;
  1311. /*
  1312. * Extract hwconfig from environment since we have not properly setup
  1313. * the environment but need it for ddr config params
  1314. */
  1315. if (getenv_f("hwconfig", buffer, sizeof(buffer)) > 0)
  1316. buf = buffer;
  1317. /* if hwconfig is not enabled, or "sdram" is not defined, use spd */
  1318. if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
  1319. if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
  1320. use_spd = 1;
  1321. else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
  1322. "fixed", buf))
  1323. use_spd = 0;
  1324. else
  1325. use_spd = 1;
  1326. } else
  1327. use_spd = 1;
  1328. #endif
  1329. return use_spd;
  1330. }