mv_ddr_plat.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include "ddr3_init.h"
  6. #include "mv_ddr_sys_env_lib.h"
  7. #define DDR_INTERFACES_NUM 1
  8. #define DDR_INTERFACE_OCTETS_NUM 5
  9. /*
  10. * 1. L2 filter should be set at binary header to 0xD000000,
  11. * to avoid conflict with internal register IO.
  12. * 2. U-Boot modifies internal registers base to 0xf100000,
  13. * and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
  14. */
  15. #define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */
  16. #define ADDRESS_FILTERING_END_REGISTER 0x8c04
  17. #define DYNAMIC_CS_SIZE_CONFIG
  18. #define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
  19. /* Termal Sensor Registers */
  20. #define TSEN_CONTROL_LSB_REG 0xE4070
  21. #define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0
  22. #define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET)
  23. #define TSEN_CONTROL_MSB_REG 0xE4074
  24. #define TSEN_CONTROL_MSB_RST_OFFSET 8
  25. #define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET)
  26. #define TSEN_STATUS_REG 0xe4078
  27. #define TSEN_STATUS_READOUT_VALID_OFFSET 10
  28. #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
  29. TSEN_STATUS_READOUT_VALID_OFFSET)
  30. #define TSEN_STATUS_TEMP_OUT_OFFSET 0
  31. #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
  32. static struct dlb_config ddr3_dlb_config_table[] = {
  33. {DLB_CTRL_REG, 0x2000005c},
  34. {DLB_BUS_OPT_WT_REG, 0x00880000},
  35. {DLB_AGING_REG, 0x0f7f007f},
  36. {DLB_EVICTION_CTRL_REG, 0x0000129f},
  37. {DLB_EVICTION_TIMERS_REG, 0x00ff0000},
  38. {DLB_WTS_DIFF_CS_REG, 0x04030802},
  39. {DLB_WTS_DIFF_BG_REG, 0x00000a02},
  40. {DLB_WTS_SAME_BG_REG, 0x09000a01},
  41. {DLB_WTS_CMDS_REG, 0x00020005},
  42. {DLB_WTS_ATTR_PRIO_REG, 0x00060f10},
  43. {DLB_QUEUE_MAP_REG, 0x00000543},
  44. {DLB_SPLIT_REG, 0x00000000},
  45. {DLB_USER_CMD_REG, 0x00000000},
  46. {0x0, 0x0}
  47. };
  48. static struct dlb_config *sys_env_dlb_config_ptr_get(void)
  49. {
  50. return &ddr3_dlb_config_table[0];
  51. }
  52. static u8 a38x_bw_per_freq[DDR_FREQ_LAST] = {
  53. 0x3, /* DDR_FREQ_100 */
  54. 0x4, /* DDR_FREQ_400 */
  55. 0x4, /* DDR_FREQ_533 */
  56. 0x5, /* DDR_FREQ_667 */
  57. 0x5, /* DDR_FREQ_800 */
  58. 0x5, /* DDR_FREQ_933 */
  59. 0x5, /* DDR_FREQ_1066 */
  60. 0x3, /* DDR_FREQ_311 */
  61. 0x3, /* DDR_FREQ_333 */
  62. 0x4, /* DDR_FREQ_467 */
  63. 0x5, /* DDR_FREQ_850 */
  64. 0x5, /* DDR_FREQ_600 */
  65. 0x3, /* DDR_FREQ_300 */
  66. 0x5, /* DDR_FREQ_900 */
  67. 0x3, /* DDR_FREQ_360 */
  68. 0x5 /* DDR_FREQ_1000 */
  69. };
  70. static u8 a38x_rate_per_freq[DDR_FREQ_LAST] = {
  71. 0x1, /* DDR_FREQ_100 */
  72. 0x2, /* DDR_FREQ_400 */
  73. 0x2, /* DDR_FREQ_533 */
  74. 0x2, /* DDR_FREQ_667 */
  75. 0x2, /* DDR_FREQ_800 */
  76. 0x3, /* DDR_FREQ_933 */
  77. 0x3, /* DDR_FREQ_1066 */
  78. 0x1, /* DDR_FREQ_311 */
  79. 0x1, /* DDR_FREQ_333 */
  80. 0x2, /* DDR_FREQ_467 */
  81. 0x2, /* DDR_FREQ_850 */
  82. 0x2, /* DDR_FREQ_600 */
  83. 0x1, /* DDR_FREQ_300 */
  84. 0x2, /* DDR_FREQ_900 */
  85. 0x1, /* DDR_FREQ_360 */
  86. 0x2 /* DDR_FREQ_1000 */
  87. };
  88. static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = {
  89. 666, /* 0 */
  90. 1332,
  91. 800,
  92. 1600,
  93. 1066,
  94. 2132,
  95. 1200,
  96. 2400,
  97. 1332,
  98. 1332,
  99. 1500,
  100. 1500,
  101. 1600, /* 12 */
  102. 1600,
  103. 1700,
  104. 1700,
  105. 1866,
  106. 1866,
  107. 1800, /* 18 */
  108. 2000,
  109. 2000,
  110. 4000,
  111. 2132,
  112. 2132,
  113. 2300,
  114. 2300,
  115. 2400,
  116. 2400,
  117. 2500,
  118. 2500,
  119. 800
  120. };
  121. static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = {
  122. 666, /* 0 */
  123. 1332,
  124. 800,
  125. 800, /* 0x3 */
  126. 1066,
  127. 1066, /* 0x5 */
  128. 1200,
  129. 2400,
  130. 1332,
  131. 1332,
  132. 1500, /* 10 */
  133. 1600, /* 0xB */
  134. 1600,
  135. 1600,
  136. 1700,
  137. 1560, /* 0xF */
  138. 1866,
  139. 1866,
  140. 1800,
  141. 2000,
  142. 2000, /* 20 */
  143. 4000,
  144. 2132,
  145. 2132,
  146. 2300,
  147. 2300,
  148. 2400,
  149. 2400,
  150. 2500,
  151. 2500,
  152. 1800 /* 30 - 0x1E */
  153. };
  154. static u32 async_mode_at_tf;
  155. static u32 dq_bit_map_2_phy_pin[] = {
  156. 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
  157. 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
  158. 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
  159. 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
  160. 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
  161. };
  162. void mv_ddr_mem_scrubbing(void)
  163. {
  164. }
  165. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  166. enum hws_ddr_freq freq);
  167. /*
  168. * Read temperature TJ value
  169. */
  170. static u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
  171. {
  172. int reg = 0;
  173. /* Initiates TSEN hardware reset once */
  174. if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) {
  175. reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK);
  176. /* set Tsen Tc Trim to correct default value (errata #132698) */
  177. reg = reg_read(TSEN_CONTROL_LSB_REG);
  178. reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK;
  179. reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET;
  180. reg_write(TSEN_CONTROL_LSB_REG, reg);
  181. }
  182. mdelay(10);
  183. /* Check if the readout field is valid */
  184. if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
  185. printf("%s: TSEN not ready\n", __func__);
  186. return 0;
  187. }
  188. reg = reg_read(TSEN_STATUS_REG);
  189. reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
  190. return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
  191. }
  192. /*
  193. * Name: ddr3_tip_a38x_get_freq_config.
  194. * Desc:
  195. * Args:
  196. * Notes:
  197. * Returns: MV_OK if success, other error code if fail.
  198. */
  199. static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
  200. struct hws_tip_freq_config_info
  201. *freq_config_info)
  202. {
  203. if (a38x_bw_per_freq[freq] == 0xff)
  204. return MV_NOT_SUPPORTED;
  205. if (freq_config_info == NULL)
  206. return MV_BAD_PARAM;
  207. freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
  208. freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
  209. freq_config_info->is_supported = 1;
  210. return MV_OK;
  211. }
  212. static void dunit_read(u32 addr, u32 mask, u32 *data)
  213. {
  214. *data = reg_read(addr) & mask;
  215. }
  216. static void dunit_write(u32 addr, u32 mask, u32 data)
  217. {
  218. u32 reg_val = data;
  219. if (mask != MASK_ALL_BITS) {
  220. dunit_read(addr, MASK_ALL_BITS, &reg_val);
  221. reg_val &= (~mask);
  222. reg_val |= (data & mask);
  223. }
  224. reg_write(addr, reg_val);
  225. }
  226. #define ODPG_ENABLE_REG 0x186d4
  227. #define ODPG_EN_OFFS 0
  228. #define ODPG_EN_MASK 0x1
  229. #define ODPG_EN_ENA 1
  230. #define ODPG_EN_DONE 0
  231. #define ODPG_DIS_OFFS 8
  232. #define ODPG_DIS_MASK 0x1
  233. #define ODPG_DIS_DIS 1
  234. void mv_ddr_odpg_enable(void)
  235. {
  236. dunit_write(ODPG_ENABLE_REG,
  237. ODPG_EN_MASK << ODPG_EN_OFFS,
  238. ODPG_EN_ENA << ODPG_EN_OFFS);
  239. }
  240. void mv_ddr_odpg_disable(void)
  241. {
  242. dunit_write(ODPG_ENABLE_REG,
  243. ODPG_DIS_MASK << ODPG_DIS_OFFS,
  244. ODPG_DIS_DIS << ODPG_DIS_OFFS);
  245. }
  246. void mv_ddr_odpg_done_clr(void)
  247. {
  248. return;
  249. }
  250. int mv_ddr_is_odpg_done(u32 count)
  251. {
  252. u32 i, data;
  253. for (i = 0; i < count; i++) {
  254. dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data);
  255. if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) ==
  256. ODPG_EN_DONE)
  257. break;
  258. }
  259. if (i >= count) {
  260. printf("%s: timeout\n", __func__);
  261. return MV_FAIL;
  262. }
  263. return MV_OK;
  264. }
  265. void mv_ddr_training_enable(void)
  266. {
  267. dunit_write(GLOB_CTRL_STATUS_REG,
  268. TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS,
  269. TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS);
  270. }
  271. #define DRAM_INIT_CTRL_STATUS_REG 0x18488
  272. #define TRAINING_TRIGGER_OFFS 0
  273. #define TRAINING_TRIGGER_MASK 0x1
  274. #define TRAINING_TRIGGER_ENA 1
  275. #define TRAINING_DONE_OFFS 1
  276. #define TRAINING_DONE_MASK 0x1
  277. #define TRAINING_DONE_DONE 1
  278. #define TRAINING_DONE_NOT_DONE 0
  279. #define TRAINING_RESULT_OFFS 2
  280. #define TRAINING_RESULT_MASK 0x1
  281. #define TRAINING_RESULT_PASS 0
  282. #define TRAINING_RESULT_FAIL 1
  283. int mv_ddr_is_training_done(u32 count, u32 *result)
  284. {
  285. u32 i, data;
  286. if (result == NULL) {
  287. printf("%s: NULL result pointer found\n", __func__);
  288. return MV_FAIL;
  289. }
  290. for (i = 0; i < count; i++) {
  291. dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data);
  292. if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) ==
  293. TRAINING_DONE_DONE)
  294. break;
  295. }
  296. if (i >= count) {
  297. printf("%s: timeout\n", __func__);
  298. return MV_FAIL;
  299. }
  300. *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK;
  301. return MV_OK;
  302. }
  303. #define DM_PAD 10
  304. u32 mv_ddr_dm_pad_get(void)
  305. {
  306. return DM_PAD;
  307. }
  308. /*
  309. * Name: ddr3_tip_a38x_select_ddr_controller.
  310. * Desc: Enable/Disable access to Marvell's server.
  311. * Args: dev_num - device number
  312. * enable - whether to enable or disable the server
  313. * Notes:
  314. * Returns: MV_OK if success, other error code if fail.
  315. */
  316. static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
  317. {
  318. u32 reg;
  319. reg = reg_read(DUAL_DUNIT_CFG_REG);
  320. if (enable)
  321. reg |= (1 << 6);
  322. else
  323. reg &= ~(1 << 6);
  324. reg_write(DUAL_DUNIT_CFG_REG, reg);
  325. return MV_OK;
  326. }
  327. static u8 ddr3_tip_clock_mode(u32 frequency)
  328. {
  329. if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400))
  330. return 1;
  331. return 2;
  332. }
  333. static int mv_ddr_sar_freq_get(int dev_num, enum hws_ddr_freq *freq)
  334. {
  335. u32 reg, ref_clk_satr;
  336. /* Read sample at reset setting */
  337. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  338. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  339. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  340. ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
  341. if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
  342. DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
  343. switch (reg) {
  344. case 0x1:
  345. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  346. ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n",
  347. reg));
  348. /* fallthrough */
  349. case 0x0:
  350. *freq = DDR_FREQ_333;
  351. break;
  352. case 0x3:
  353. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  354. ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n",
  355. reg));
  356. /* fallthrough */
  357. case 0x2:
  358. *freq = DDR_FREQ_400;
  359. break;
  360. case 0xd:
  361. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  362. ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n",
  363. reg));
  364. /* fallthrough */
  365. case 0x4:
  366. *freq = DDR_FREQ_533;
  367. break;
  368. case 0x6:
  369. *freq = DDR_FREQ_600;
  370. break;
  371. case 0x11:
  372. case 0x14:
  373. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  374. ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n",
  375. reg));
  376. /* fallthrough */
  377. case 0x8:
  378. *freq = DDR_FREQ_667;
  379. break;
  380. case 0x15:
  381. case 0x1b:
  382. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  383. ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n",
  384. reg));
  385. /* fallthrough */
  386. case 0xc:
  387. *freq = DDR_FREQ_800;
  388. break;
  389. case 0x10:
  390. *freq = DDR_FREQ_933;
  391. break;
  392. case 0x12:
  393. *freq = DDR_FREQ_900;
  394. break;
  395. case 0x13:
  396. *freq = DDR_FREQ_933;
  397. break;
  398. default:
  399. *freq = 0;
  400. return MV_NOT_SUPPORTED;
  401. }
  402. } else { /* REFCLK 40MHz case */
  403. switch (reg) {
  404. case 0x3:
  405. *freq = DDR_FREQ_400;
  406. break;
  407. case 0x5:
  408. *freq = DDR_FREQ_533;
  409. break;
  410. case 0xb:
  411. *freq = DDR_FREQ_800;
  412. break;
  413. case 0x1e:
  414. *freq = DDR_FREQ_900;
  415. break;
  416. default:
  417. *freq = 0;
  418. return MV_NOT_SUPPORTED;
  419. }
  420. }
  421. return MV_OK;
  422. }
  423. static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
  424. {
  425. u32 reg, ref_clk_satr;
  426. /* Read sample at reset setting */
  427. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  428. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  429. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  430. ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
  431. if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
  432. DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) {
  433. switch (reg) {
  434. case 0x0:
  435. case 0x1:
  436. /* Medium is same as TF to run PBS in this freq */
  437. *freq = DDR_FREQ_333;
  438. break;
  439. case 0x2:
  440. case 0x3:
  441. /* Medium is same as TF to run PBS in this freq */
  442. *freq = DDR_FREQ_400;
  443. break;
  444. case 0x4:
  445. case 0xd:
  446. /* Medium is same as TF to run PBS in this freq */
  447. *freq = DDR_FREQ_533;
  448. break;
  449. case 0x8:
  450. case 0x10:
  451. case 0x11:
  452. case 0x14:
  453. *freq = DDR_FREQ_333;
  454. break;
  455. case 0xc:
  456. case 0x15:
  457. case 0x1b:
  458. *freq = DDR_FREQ_400;
  459. break;
  460. case 0x6:
  461. *freq = DDR_FREQ_300;
  462. break;
  463. case 0x12:
  464. *freq = DDR_FREQ_360;
  465. break;
  466. case 0x13:
  467. *freq = DDR_FREQ_400;
  468. break;
  469. default:
  470. *freq = 0;
  471. return MV_NOT_SUPPORTED;
  472. }
  473. } else { /* REFCLK 40MHz case */
  474. switch (reg) {
  475. case 0x3:
  476. /* Medium is same as TF to run PBS in this freq */
  477. *freq = DDR_FREQ_400;
  478. break;
  479. case 0x5:
  480. /* Medium is same as TF to run PBS in this freq */
  481. *freq = DDR_FREQ_533;
  482. break;
  483. case 0xb:
  484. *freq = DDR_FREQ_400;
  485. break;
  486. case 0x1e:
  487. *freq = DDR_FREQ_360;
  488. break;
  489. default:
  490. *freq = 0;
  491. return MV_NOT_SUPPORTED;
  492. }
  493. }
  494. return MV_OK;
  495. }
  496. static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
  497. {
  498. #if defined(CONFIG_ARMADA_39X)
  499. info_ptr->device_id = 0x6900;
  500. #else
  501. info_ptr->device_id = 0x6800;
  502. #endif
  503. info_ptr->ck_delay = ck_delay;
  504. return MV_OK;
  505. }
  506. /* check indirect access to phy register file completed */
  507. static int is_prfa_done(void)
  508. {
  509. u32 reg_val;
  510. u32 iter = 0;
  511. do {
  512. if (iter++ > MAX_POLLING_ITERATIONS) {
  513. printf("error: %s: polling timeout\n", __func__);
  514. return MV_FAIL;
  515. }
  516. dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
  517. reg_val >>= PRFA_REQ_OFFS;
  518. reg_val &= PRFA_REQ_MASK;
  519. } while (reg_val == PRFA_REQ_ENA); /* request pending */
  520. return MV_OK;
  521. }
  522. /* write to phy register thru indirect access */
  523. static int prfa_write(enum hws_access_type phy_access, u32 phy,
  524. enum hws_ddr_phy phy_type, u32 addr,
  525. u32 data, enum hws_operation op_type)
  526. {
  527. u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) |
  528. ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) |
  529. ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) |
  530. ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) |
  531. ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) |
  532. (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) |
  533. ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS);
  534. dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
  535. reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS);
  536. dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val);
  537. /* polling for prfa request completion */
  538. if (is_prfa_done() != MV_OK)
  539. return MV_FAIL;
  540. return MV_OK;
  541. }
  542. /* read from phy register thru indirect access */
  543. static int prfa_read(enum hws_access_type phy_access, u32 phy,
  544. enum hws_ddr_phy phy_type, u32 addr, u32 *data)
  545. {
  546. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  547. u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
  548. u32 i, reg_val;
  549. if (phy_access == ACCESS_TYPE_MULTICAST) {
  550. for (i = 0; i < max_phy; i++) {
  551. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
  552. if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK)
  553. return MV_FAIL;
  554. dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
  555. data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
  556. }
  557. } else {
  558. if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK)
  559. return MV_FAIL;
  560. dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, &reg_val);
  561. *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK;
  562. }
  563. return MV_OK;
  564. }
  565. static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id)
  566. {
  567. struct hws_tip_config_func_db config_func;
  568. /* new read leveling version */
  569. config_func.mv_ddr_dunit_read = dunit_read;
  570. config_func.mv_ddr_dunit_write = dunit_write;
  571. config_func.tip_dunit_mux_select_func =
  572. ddr3_tip_a38x_select_ddr_controller;
  573. config_func.tip_get_freq_config_info_func =
  574. ddr3_tip_a38x_get_freq_config;
  575. config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
  576. config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
  577. config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
  578. config_func.tip_get_clock_ratio = ddr3_tip_clock_mode;
  579. config_func.tip_external_read = ddr3_tip_ext_read;
  580. config_func.tip_external_write = ddr3_tip_ext_write;
  581. config_func.mv_ddr_phy_read = prfa_read;
  582. config_func.mv_ddr_phy_write = prfa_write;
  583. ddr3_tip_init_config_func(dev_num, &config_func);
  584. ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
  585. /* set device attributes*/
  586. ddr3_tip_dev_attr_init(dev_num);
  587. ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4);
  588. ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE);
  589. ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM);
  590. #ifdef CONFIG_ARMADA_39X
  591. ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 1);
  592. #else
  593. ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0);
  594. #endif
  595. ca_delay = 0;
  596. delay_enable = 1;
  597. dfs_low_freq = DFS_LOW_FREQ_VALUE;
  598. calibration_update_control = 1;
  599. ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
  600. return MV_OK;
  601. }
  602. static int mv_ddr_training_mask_set(void)
  603. {
  604. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  605. enum hws_ddr_freq ddr_freq = tm->interface_params[0].memory_freq;
  606. mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
  607. LOAD_PATTERN_MASK_BIT |
  608. SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
  609. WRITE_LEVELING_SUPP_MASK_BIT |
  610. READ_LEVELING_MASK_BIT |
  611. PBS_RX_MASK_BIT |
  612. PBS_TX_MASK_BIT |
  613. SET_TARGET_FREQ_MASK_BIT |
  614. WRITE_LEVELING_TF_MASK_BIT |
  615. WRITE_LEVELING_SUPP_TF_MASK_BIT |
  616. READ_LEVELING_TF_MASK_BIT |
  617. CENTRALIZATION_RX_MASK_BIT |
  618. CENTRALIZATION_TX_MASK_BIT);
  619. rl_mid_freq_wa = 1;
  620. if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
  621. mask_tune_func = (WRITE_LEVELING_MASK_BIT |
  622. LOAD_PATTERN_2_MASK_BIT |
  623. WRITE_LEVELING_SUPP_MASK_BIT |
  624. READ_LEVELING_MASK_BIT |
  625. PBS_RX_MASK_BIT |
  626. PBS_TX_MASK_BIT |
  627. CENTRALIZATION_RX_MASK_BIT |
  628. CENTRALIZATION_TX_MASK_BIT);
  629. rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
  630. }
  631. /* Supplementary not supported for ECC modes */
  632. if (1 == ddr3_if_ecc_enabled()) {
  633. mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
  634. mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
  635. mask_tune_func &= ~PBS_TX_MASK_BIT;
  636. mask_tune_func &= ~PBS_RX_MASK_BIT;
  637. }
  638. return MV_OK;
  639. }
  640. /* function: mv_ddr_set_calib_controller
  641. * this function sets the controller which will control
  642. * the calibration cycle in the end of the training.
  643. * 1 - internal controller
  644. * 2 - external controller
  645. */
  646. void mv_ddr_set_calib_controller(void)
  647. {
  648. calibration_update_control = CAL_UPDATE_CTRL_INT;
  649. }
  650. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  651. enum hws_ddr_freq frequency)
  652. {
  653. u32 divider = 0;
  654. u32 sar_val, ref_clk_satr;
  655. u32 async_val;
  656. if (if_id != 0) {
  657. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  658. ("A38x does not support interface 0x%x\n",
  659. if_id));
  660. return MV_BAD_PARAM;
  661. }
  662. /* get VCO freq index */
  663. sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  664. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  665. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  666. ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG);
  667. if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) ==
  668. DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ)
  669. divider = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val] / freq_val[frequency];
  670. else
  671. divider = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val] / freq_val[frequency];
  672. if ((async_mode_at_tf == 1) && (freq_val[frequency] > 400)) {
  673. /* Set async mode */
  674. dunit_write(0x20220, 0x1000, 0x1000);
  675. dunit_write(0xe42f4, 0x200, 0x200);
  676. /* Wait for async mode setup */
  677. mdelay(5);
  678. /* Set KNL values */
  679. switch (frequency) {
  680. #ifdef CONFIG_DDR3
  681. case DDR_FREQ_467:
  682. async_val = 0x806f012;
  683. break;
  684. case DDR_FREQ_533:
  685. async_val = 0x807f012;
  686. break;
  687. case DDR_FREQ_600:
  688. async_val = 0x805f00a;
  689. break;
  690. #endif
  691. case DDR_FREQ_667:
  692. async_val = 0x809f012;
  693. break;
  694. case DDR_FREQ_800:
  695. async_val = 0x807f00a;
  696. break;
  697. #ifdef CONFIG_DDR3
  698. case DDR_FREQ_850:
  699. async_val = 0x80cb012;
  700. break;
  701. #endif
  702. case DDR_FREQ_900:
  703. async_val = 0x80d7012;
  704. break;
  705. case DDR_FREQ_933:
  706. async_val = 0x80df012;
  707. break;
  708. case DDR_FREQ_1000:
  709. async_val = 0x80ef012;
  710. break;
  711. case DDR_FREQ_1066:
  712. async_val = 0x80ff012;
  713. break;
  714. default:
  715. /* set DDR_FREQ_667 as default */
  716. async_val = 0x809f012;
  717. }
  718. dunit_write(0xe42f0, 0xffffffff, async_val);
  719. } else {
  720. /* Set sync mode */
  721. dunit_write(0x20220, 0x1000, 0x0);
  722. dunit_write(0xe42f4, 0x200, 0x0);
  723. /* cpupll_clkdiv_reset_mask */
  724. dunit_write(0xe4264, 0xff, 0x1f);
  725. /* cpupll_clkdiv_reload_smooth */
  726. dunit_write(0xe4260, (0xff << 8), (0x2 << 8));
  727. /* cpupll_clkdiv_relax_en */
  728. dunit_write(0xe4260, (0xff << 24), (0x2 << 24));
  729. /* write the divider */
  730. dunit_write(0xe4268, (0x3f << 8), (divider << 8));
  731. /* set cpupll_clkdiv_reload_ratio */
  732. dunit_write(0xe4264, (1 << 8), (1 << 8));
  733. /* undet cpupll_clkdiv_reload_ratio */
  734. dunit_write(0xe4264, (1 << 8), 0x0);
  735. /* clear cpupll_clkdiv_reload_force */
  736. dunit_write(0xe4260, (0xff << 8), 0x0);
  737. /* clear cpupll_clkdiv_relax_en */
  738. dunit_write(0xe4260, (0xff << 24), 0x0);
  739. /* clear cpupll_clkdiv_reset_mask */
  740. dunit_write(0xe4264, 0xff, 0x0);
  741. }
  742. /* Dunit training clock + 1:1/2:1 mode */
  743. dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16));
  744. dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15));
  745. return MV_OK;
  746. }
  747. /*
  748. * external read from memory
  749. */
  750. int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
  751. u32 num_of_bursts, u32 *data)
  752. {
  753. u32 burst_num;
  754. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  755. data[burst_num] = readl(reg_addr + 4 * burst_num);
  756. return MV_OK;
  757. }
  758. /*
  759. * external write to memory
  760. */
  761. int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
  762. u32 num_of_bursts, u32 *data) {
  763. u32 burst_num;
  764. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  765. writel(data[burst_num], reg_addr + 4 * burst_num);
  766. return MV_OK;
  767. }
  768. int mv_ddr_early_init(void)
  769. {
  770. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  771. /* FIXME: change this configuration per ddr type
  772. * configure a380 and a390 to work with receiver odt timing
  773. * the odt_config is defined:
  774. * '1' in ddr4
  775. * '0' in ddr3
  776. * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1')
  777. * to configure the odt to work with timing restrictions
  778. */
  779. mv_ddr_sw_db_init(0, 0);
  780. if (tm->interface_params[0].memory_freq != DDR_FREQ_SAR)
  781. async_mode_at_tf = 1;
  782. return MV_OK;
  783. }
  784. int mv_ddr_early_init2(void)
  785. {
  786. mv_ddr_training_mask_set();
  787. return MV_OK;
  788. }
  789. int mv_ddr_pre_training_fixup(void)
  790. {
  791. return 0;
  792. }
  793. int mv_ddr_post_training_fixup(void)
  794. {
  795. return 0;
  796. }
  797. int ddr3_post_run_alg(void)
  798. {
  799. return MV_OK;
  800. }
  801. int ddr3_silicon_post_init(void)
  802. {
  803. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  804. /* Set half bus width */
  805. if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
  806. CHECK_STATUS(ddr3_tip_if_write
  807. (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
  808. SDRAM_CFG_REG, 0x0, 0x8000));
  809. }
  810. return MV_OK;
  811. }
  812. u32 mv_ddr_init_freq_get(void)
  813. {
  814. enum hws_ddr_freq freq;
  815. mv_ddr_sar_freq_get(0, &freq);
  816. return freq;
  817. }
  818. static u32 ddr3_get_bus_width(void)
  819. {
  820. u32 bus_width;
  821. bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >>
  822. BUS_IN_USE_OFFS;
  823. return (bus_width == 0) ? 16 : 32;
  824. }
  825. static u32 ddr3_get_device_width(u32 cs)
  826. {
  827. u32 device_width;
  828. device_width = (reg_read(SDRAM_ADDR_CTRL_REG) &
  829. (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >>
  830. CS_STRUCT_OFFS(cs);
  831. return (device_width == 0) ? 8 : 16;
  832. }
  833. static u32 ddr3_get_device_size(u32 cs)
  834. {
  835. u32 device_size_low, device_size_high, device_size;
  836. u32 data, cs_low_offset, cs_high_offset;
  837. cs_low_offset = CS_SIZE_OFFS(cs);
  838. cs_high_offset = CS_SIZE_HIGH_OFFS(cs);
  839. data = reg_read(SDRAM_ADDR_CTRL_REG);
  840. device_size_low = (data >> cs_low_offset) & 0x3;
  841. device_size_high = (data >> cs_high_offset) & 0x1;
  842. device_size = device_size_low | (device_size_high << 2);
  843. switch (device_size) {
  844. case 0:
  845. return 2048;
  846. case 2:
  847. return 512;
  848. case 3:
  849. return 1024;
  850. case 4:
  851. return 4096;
  852. case 5:
  853. return 8192;
  854. case 1:
  855. default:
  856. DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
  857. /* zeroes mem size in ddr3_calc_mem_cs_size */
  858. return 0;
  859. }
  860. }
  861. int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size)
  862. {
  863. u32 cs_mem_size;
  864. /* Calculate in MiB */
  865. cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
  866. ddr3_get_device_size(cs)) / 8;
  867. /*
  868. * Multiple controller bus width, 2x for 64 bit
  869. * (SoC controller may be 32 or 64 bit,
  870. * so bit 15 in 0x1400, that means if whole bus used or only half,
  871. * have a differnt meaning
  872. */
  873. cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
  874. if ((cs_mem_size < 128) || (cs_mem_size > 4096)) {
  875. DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
  876. return MV_BAD_VALUE;
  877. }
  878. *cs_size = cs_mem_size << 20; /* write cs size in bytes */
  879. return MV_OK;
  880. }
  881. static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
  882. {
  883. u32 reg, cs;
  884. uint64_t mem_total_size = 0;
  885. uint64_t cs_mem_size = 0;
  886. uint64_t mem_total_size_c, cs_mem_size_c;
  887. #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
  888. u32 physical_mem_size;
  889. u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
  890. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  891. #endif
  892. /* Open fast path windows */
  893. for (cs = 0; cs < MAX_CS_NUM; cs++) {
  894. if (cs_ena & (1 << cs)) {
  895. /* get CS size */
  896. if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
  897. return MV_FAIL;
  898. #ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
  899. /*
  900. * if number of address pins doesn't allow to use max
  901. * mem size that is defined in topology
  902. * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
  903. */
  904. physical_mem_size = mem_size
  905. [tm->interface_params[0].memory_size];
  906. if (ddr3_get_device_width(cs) == 16) {
  907. /*
  908. * 16bit mem device can be twice more - no need
  909. * in less significant pin
  910. */
  911. max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
  912. }
  913. if (physical_mem_size > max_mem_size) {
  914. cs_mem_size = max_mem_size *
  915. (ddr3_get_bus_width() /
  916. ddr3_get_device_width(cs));
  917. printf("Updated Physical Mem size is from 0x%x to %x\n",
  918. physical_mem_size,
  919. DEVICE_MAX_DRAM_ADDRESS_SIZE);
  920. }
  921. #endif
  922. /* set fast path window control for the cs */
  923. reg = 0xffffe1;
  924. reg |= (cs << 2);
  925. reg |= (cs_mem_size - 1) & 0xffff0000;
  926. /*Open fast path Window */
  927. reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
  928. /* Set fast path window base address for the cs */
  929. reg = ((cs_mem_size) * cs) & 0xffff0000;
  930. /* Set base address */
  931. reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
  932. /*
  933. * Since memory size may be bigger than 4G the summ may
  934. * be more than 32 bit word,
  935. * so to estimate the result divide mem_total_size and
  936. * cs_mem_size by 0x10000 (it is equal to >> 16)
  937. */
  938. mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff;
  939. cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff;
  940. /* if the sum less than 2 G - calculate the value */
  941. if (mem_total_size_c + cs_mem_size_c < 0x10000)
  942. mem_total_size += cs_mem_size;
  943. else /* put max possible size */
  944. mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
  945. }
  946. }
  947. /* Set L2 filtering to Max Memory size */
  948. reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
  949. return MV_OK;
  950. }
  951. static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type)
  952. {
  953. u32 win_ctrl_reg, num_of_win_regs;
  954. u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg();
  955. u32 ui;
  956. win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
  957. num_of_win_regs = 16;
  958. /* Return XBAR windows 4-7 or 16-19 init configuration */
  959. for (ui = 0; ui < num_of_win_regs; ui++)
  960. reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
  961. printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
  962. ddr_type);
  963. #if defined DYNAMIC_CS_SIZE_CONFIG
  964. if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
  965. printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
  966. #else
  967. u32 reg, cs;
  968. reg = 0x1fffffe1;
  969. for (cs = 0; cs < MAX_CS_NUM; cs++) {
  970. if (cs_ena & (1 << cs)) {
  971. reg |= (cs << 2);
  972. break;
  973. }
  974. }
  975. /* Open fast path Window to - 0.5G */
  976. reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg);
  977. #endif
  978. return MV_OK;
  979. }
  980. static int ddr3_save_and_set_training_windows(u32 *win)
  981. {
  982. u32 cs_ena;
  983. u32 reg, tmp_count, cs, ui;
  984. u32 win_ctrl_reg, win_base_reg, win_remap_reg;
  985. u32 num_of_win_regs, win_jump_index;
  986. win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
  987. win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
  988. win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
  989. win_jump_index = 0x10;
  990. num_of_win_regs = 16;
  991. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  992. #ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
  993. /*
  994. * Disable L2 filtering during DDR training
  995. * (when Cross Bar window is open)
  996. */
  997. reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
  998. #endif
  999. cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
  1000. /* Close XBAR Window 19 - Not needed */
  1001. /* {0x000200e8} - Open Mbus Window - 2G */
  1002. reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
  1003. /* Save XBAR Windows 4-19 init configurations */
  1004. for (ui = 0; ui < num_of_win_regs; ui++)
  1005. win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
  1006. /* Open XBAR Windows 4-7 or 16-19 for other CS */
  1007. reg = 0;
  1008. tmp_count = 0;
  1009. for (cs = 0; cs < MAX_CS_NUM; cs++) {
  1010. if (cs_ena & (1 << cs)) {
  1011. switch (cs) {
  1012. case 0:
  1013. reg = 0x0e00;
  1014. break;
  1015. case 1:
  1016. reg = 0x0d00;
  1017. break;
  1018. case 2:
  1019. reg = 0x0b00;
  1020. break;
  1021. case 3:
  1022. reg = 0x0700;
  1023. break;
  1024. }
  1025. reg |= (1 << 0);
  1026. reg |= (SDRAM_CS_SIZE & 0xffff0000);
  1027. reg_write(win_ctrl_reg + win_jump_index * tmp_count,
  1028. reg);
  1029. reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
  1030. 0xffff0000);
  1031. reg_write(win_base_reg + win_jump_index * tmp_count,
  1032. reg);
  1033. if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
  1034. reg_write(win_remap_reg +
  1035. win_jump_index * tmp_count, 0);
  1036. tmp_count++;
  1037. }
  1038. }
  1039. return MV_OK;
  1040. }
  1041. static u32 win[16];
  1042. int mv_ddr_pre_training_soc_config(const char *ddr_type)
  1043. {
  1044. u32 soc_num;
  1045. u32 reg_val;
  1046. /* Switching CPU to MRVL ID */
  1047. soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
  1048. SAR1_CPU_CORE_OFFSET;
  1049. switch (soc_num) {
  1050. case 0x3:
  1051. reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
  1052. reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
  1053. /* fallthrough */
  1054. case 0x1:
  1055. reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
  1056. /* fallthrough */
  1057. case 0x0:
  1058. reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
  1059. /* fallthrough */
  1060. default:
  1061. break;
  1062. }
  1063. /*
  1064. * Set DRAM Reset Mask in case detected GPIO indication of wakeup from
  1065. * suspend i.e the DRAM values will not be overwritten / reset when
  1066. * waking from suspend
  1067. */
  1068. if (mv_ddr_sys_env_suspend_wakeup_check() ==
  1069. SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
  1070. reg_bit_set(SDRAM_INIT_CTRL_REG,
  1071. DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS);
  1072. }
  1073. /* Check if DRAM is already initialized */
  1074. if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
  1075. (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
  1076. printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
  1077. return MV_OK;
  1078. }
  1079. /* Fix read ready phases for all SOC in reg 0x15c8 */
  1080. reg_val = reg_read(TRAINING_DBG_3_REG);
  1081. reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0));
  1082. reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */
  1083. reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1));
  1084. reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */
  1085. reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3));
  1086. reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */
  1087. reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4));
  1088. reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */
  1089. reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5));
  1090. reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */
  1091. reg_write(TRAINING_DBG_3_REG, reg_val);
  1092. /*
  1093. * Axi_bresp_mode[8] = Compliant,
  1094. * Axi_addr_decode_cntrl[11] = Internal,
  1095. * Axi_data_bus_width[0] = 128bit
  1096. * */
  1097. /* 0x14a8 - AXI Control Register */
  1098. reg_write(AXI_CTRL_REG, 0);
  1099. /*
  1100. * Stage 2 - Training Values Setup
  1101. */
  1102. /* Set X-BAR windows for the training sequence */
  1103. ddr3_save_and_set_training_windows(win);
  1104. return MV_OK;
  1105. }
  1106. static int ddr3_new_tip_dlb_config(void)
  1107. {
  1108. u32 reg, i = 0;
  1109. struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
  1110. /* Write the configuration */
  1111. while (config_table_ptr[i].reg_addr != 0) {
  1112. reg_write(config_table_ptr[i].reg_addr,
  1113. config_table_ptr[i].reg_data);
  1114. i++;
  1115. }
  1116. /* Enable DLB */
  1117. reg = reg_read(DLB_CTRL_REG);
  1118. reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) &
  1119. ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) &
  1120. ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) &
  1121. ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) &
  1122. ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
  1123. reg |= (DLB_EN_ENA << DLB_EN_OFFS) |
  1124. (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) |
  1125. (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) |
  1126. (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) |
  1127. (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS);
  1128. reg_write(DLB_CTRL_REG, reg);
  1129. return MV_OK;
  1130. }
  1131. int mv_ddr_post_training_soc_config(const char *ddr_type)
  1132. {
  1133. u32 reg_val;
  1134. /* Restore and set windows */
  1135. ddr3_restore_and_set_final_windows(win, ddr_type);
  1136. /* Update DRAM init indication in bootROM register */
  1137. reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR);
  1138. reg_write(REG_BOOTROM_ROUTINE_ADDR,
  1139. reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
  1140. /* DLB config */
  1141. ddr3_new_tip_dlb_config();
  1142. return MV_OK;
  1143. }
  1144. void mv_ddr_mc_config(void)
  1145. {
  1146. /* Memory controller initializations */
  1147. struct init_cntr_param init_param;
  1148. int status;
  1149. init_param.do_mrs_phy = 1;
  1150. init_param.is_ctrl64_bit = 0;
  1151. init_param.init_phy = 1;
  1152. init_param.msys_init = 1;
  1153. status = hws_ddr3_tip_init_controller(0, &init_param);
  1154. if (status != MV_OK)
  1155. printf("DDR3 init controller - FAILED 0x%x\n", status);
  1156. status = mv_ddr_mc_init();
  1157. if (status != MV_OK)
  1158. printf("DDR3 init_sequence - FAILED 0x%x\n", status);
  1159. }
  1160. /* function: mv_ddr_mc_init
  1161. * this function enables the dunit after init controller configuration
  1162. */
  1163. int mv_ddr_mc_init(void)
  1164. {
  1165. CHECK_STATUS(ddr3_tip_enable_init_sequence(0));
  1166. return MV_OK;
  1167. }
  1168. /* function: ddr3_tip_configure_phy
  1169. * configures phy and electrical parameters
  1170. */
  1171. int ddr3_tip_configure_phy(u32 dev_num)
  1172. {
  1173. u32 if_id, phy_id;
  1174. u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
  1175. struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
  1176. CHECK_STATUS(ddr3_tip_bus_write
  1177. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1178. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  1179. PAD_ZRI_CAL_PHY_REG,
  1180. ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
  1181. CHECK_STATUS(ddr3_tip_bus_write
  1182. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1183. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  1184. PAD_ZRI_CAL_PHY_REG,
  1185. ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
  1186. CHECK_STATUS(ddr3_tip_bus_write
  1187. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1188. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  1189. PAD_ODT_CAL_PHY_REG,
  1190. ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
  1191. CHECK_STATUS(ddr3_tip_bus_write
  1192. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1193. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  1194. PAD_ODT_CAL_PHY_REG,
  1195. ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
  1196. CHECK_STATUS(ddr3_tip_bus_write
  1197. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1198. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  1199. PAD_PRE_DISABLE_PHY_REG, 0));
  1200. CHECK_STATUS(ddr3_tip_bus_write
  1201. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1202. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  1203. CMOS_CONFIG_PHY_REG, 0));
  1204. CHECK_STATUS(ddr3_tip_bus_write
  1205. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1206. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  1207. CMOS_CONFIG_PHY_REG, 0));
  1208. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  1209. /* check if the interface is enabled */
  1210. VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
  1211. for (phy_id = 0;
  1212. phy_id < octets_per_if_num;
  1213. phy_id++) {
  1214. VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id);
  1215. /* Vref & clamp */
  1216. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1217. (dev_num, ACCESS_TYPE_UNICAST,
  1218. if_id, phy_id, DDR_PHY_DATA,
  1219. PAD_CFG_PHY_REG,
  1220. ((clamp_tbl[if_id] << 4) | vref_init_val),
  1221. ((0x7 << 4) | 0x7)));
  1222. /* clamp not relevant for control */
  1223. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  1224. (dev_num, ACCESS_TYPE_UNICAST,
  1225. if_id, phy_id, DDR_PHY_CONTROL,
  1226. PAD_CFG_PHY_REG, 0x4, 0x7));
  1227. }
  1228. }
  1229. if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) ==
  1230. MV_DDR_PHY_EDGE_POSITIVE)
  1231. CHECK_STATUS(ddr3_tip_bus_write
  1232. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1233. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  1234. DDR_PHY_DATA, 0x90, 0x6002));
  1235. return MV_OK;
  1236. }
  1237. int mv_ddr_manual_cal_do(void)
  1238. {
  1239. return 0;
  1240. }