ddr3_a38x.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include <common.h>
  6. #include <i2c.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. #define A38X_NUMBER_OF_INTERFACES 5
  13. #define SAR_DEV_ID_OFFS 27
  14. #define SAR_DEV_ID_MASK 0x7
  15. /* Termal Sensor Registers */
  16. #define TSEN_STATE_REG 0xe4070
  17. #define TSEN_STATE_OFFSET 31
  18. #define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
  19. #define TSEN_CONF_REG 0xe4074
  20. #define TSEN_CONF_RST_OFFSET 8
  21. #define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
  22. #define TSEN_STATUS_REG 0xe4078
  23. #define TSEN_STATUS_READOUT_VALID_OFFSET 10
  24. #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
  25. TSEN_STATUS_READOUT_VALID_OFFSET)
  26. #define TSEN_STATUS_TEMP_OUT_OFFSET 0
  27. #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
  28. static struct dfx_access interface_map[] = {
  29. /* Pipe Client */
  30. { 0, 17 },
  31. { 1, 7 },
  32. { 1, 11 },
  33. { 0, 3 },
  34. { 1, 25 },
  35. { 0, 0 },
  36. { 0, 0 },
  37. { 0, 0 },
  38. { 0, 0 },
  39. { 0, 0 },
  40. { 0, 0 },
  41. { 0, 0 }
  42. };
  43. /* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
  44. struct trip_delay_element a38x_board_round_trip_delay_array[] = {
  45. /* 1st board */
  46. /* Interface bus DQS-delay CK-delay */
  47. { 3952, 5060 },
  48. { 3192, 4493 },
  49. { 4785, 6677 },
  50. { 3413, 7267 },
  51. { 4282, 6086 }, /* ECC PUP */
  52. { 3952, 5134 },
  53. { 3192, 4567 },
  54. { 4785, 6751 },
  55. { 3413, 7341 },
  56. { 4282, 6160 }, /* ECC PUP */
  57. /* 2nd board */
  58. /* Interface bus DQS-delay CK-delay */
  59. { 3952, 5060 },
  60. { 3192, 4493 },
  61. { 4785, 6677 },
  62. { 3413, 7267 },
  63. { 4282, 6086 }, /* ECC PUP */
  64. { 3952, 5134 },
  65. { 3192, 4567 },
  66. { 4785, 6751 },
  67. { 3413, 7341 },
  68. { 4282, 6160 } /* ECC PUP */
  69. };
  70. static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
  71. 0x3, /* DDR_FREQ_100 */
  72. 0x4, /* DDR_FREQ_400 */
  73. 0x4, /* DDR_FREQ_533 */
  74. 0x5, /* DDR_FREQ_667 */
  75. 0x5, /* DDR_FREQ_800 */
  76. 0x5, /* DDR_FREQ_933 */
  77. 0x5, /* DDR_FREQ_1066 */
  78. 0x3, /* DDR_FREQ_311 */
  79. 0x3, /* DDR_FREQ_333 */
  80. 0x4, /* DDR_FREQ_467 */
  81. 0x5, /* DDR_FREQ_850 */
  82. 0x5, /* DDR_FREQ_600 */
  83. 0x3, /* DDR_FREQ_300 */
  84. 0x5, /* DDR_FREQ_900 */
  85. 0x3, /* DDR_FREQ_360 */
  86. 0x5 /* DDR_FREQ_1000 */
  87. };
  88. static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
  89. /*TBD*/ 0x1, /* DDR_FREQ_100 */
  90. 0x2, /* DDR_FREQ_400 */
  91. 0x2, /* DDR_FREQ_533 */
  92. 0x2, /* DDR_FREQ_667 */
  93. 0x2, /* DDR_FREQ_800 */
  94. 0x3, /* DDR_FREQ_933 */
  95. 0x3, /* DDR_FREQ_1066 */
  96. 0x1, /* DDR_FREQ_311 */
  97. 0x1, /* DDR_FREQ_333 */
  98. 0x2, /* DDR_FREQ_467 */
  99. 0x2, /* DDR_FREQ_850 */
  100. 0x2, /* DDR_FREQ_600 */
  101. 0x1, /* DDR_FREQ_300 */
  102. 0x2, /* DDR_FREQ_900 */
  103. 0x1, /* DDR_FREQ_360 */
  104. 0x2 /* DDR_FREQ_1000 */
  105. };
  106. static u16 a38x_vco_freq_per_sar[] = {
  107. 666, /* 0 */
  108. 1332,
  109. 800,
  110. 1600,
  111. 1066,
  112. 2132,
  113. 1200,
  114. 2400,
  115. 1332,
  116. 1332,
  117. 1500,
  118. 1500,
  119. 1600, /* 12 */
  120. 1600,
  121. 1700,
  122. 1700,
  123. 1866,
  124. 1866,
  125. 1800, /* 18 */
  126. 2000,
  127. 2000,
  128. 4000,
  129. 2132,
  130. 2132,
  131. 2300,
  132. 2300,
  133. 2400,
  134. 2400,
  135. 2500,
  136. 2500,
  137. 800
  138. };
  139. u32 pipe_multicast_mask;
  140. u32 dq_bit_map_2_phy_pin[] = {
  141. 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
  142. 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
  143. 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
  144. 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
  145. 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
  146. };
  147. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  148. enum hws_ddr_freq freq);
  149. /*
  150. * Read temperature TJ value
  151. */
  152. u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
  153. {
  154. int reg = 0;
  155. /* Initiates TSEN hardware reset once */
  156. if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
  157. reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
  158. mdelay(10);
  159. /* Check if the readout field is valid */
  160. if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
  161. printf("%s: TSEN not ready\n", __func__);
  162. return 0;
  163. }
  164. reg = reg_read(TSEN_STATUS_REG);
  165. reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
  166. return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
  167. }
  168. /*
  169. * Name: ddr3_tip_a38x_get_freq_config.
  170. * Desc:
  171. * Args:
  172. * Notes:
  173. * Returns: MV_OK if success, other error code if fail.
  174. */
  175. int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
  176. struct hws_tip_freq_config_info
  177. *freq_config_info)
  178. {
  179. if (a38x_bw_per_freq[freq] == 0xff)
  180. return MV_NOT_SUPPORTED;
  181. if (freq_config_info == NULL)
  182. return MV_BAD_PARAM;
  183. freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
  184. freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
  185. freq_config_info->is_supported = 1;
  186. return MV_OK;
  187. }
  188. /*
  189. * Name: ddr3_tip_a38x_pipe_enable.
  190. * Desc:
  191. * Args:
  192. * Notes:
  193. * Returns: MV_OK if success, other error code if fail.
  194. */
  195. int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
  196. u32 if_id, int enable)
  197. {
  198. u32 data_value, pipe_enable_mask = 0;
  199. if (enable == 0) {
  200. pipe_enable_mask = 0;
  201. } else {
  202. if (interface_access == ACCESS_TYPE_MULTICAST)
  203. pipe_enable_mask = pipe_multicast_mask;
  204. else
  205. pipe_enable_mask = (1 << interface_map[if_id].pipe);
  206. }
  207. CHECK_STATUS(ddr3_tip_reg_read
  208. (dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
  209. data_value = (data_value & (~0xff)) | pipe_enable_mask;
  210. CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
  211. return MV_OK;
  212. }
  213. /*
  214. * Name: ddr3_tip_a38x_if_write.
  215. * Desc:
  216. * Args:
  217. * Notes:
  218. * Returns: MV_OK if success, other error code if fail.
  219. */
  220. int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
  221. u32 if_id, u32 reg_addr, u32 data_value,
  222. u32 mask)
  223. {
  224. u32 ui_data_read;
  225. if (mask != MASK_ALL_BITS) {
  226. CHECK_STATUS(ddr3_tip_a38x_if_read
  227. (dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
  228. &ui_data_read, MASK_ALL_BITS));
  229. data_value = (ui_data_read & (~mask)) | (data_value & mask);
  230. }
  231. reg_write(reg_addr, data_value);
  232. return MV_OK;
  233. }
  234. /*
  235. * Name: ddr3_tip_a38x_if_read.
  236. * Desc:
  237. * Args:
  238. * Notes:
  239. * Returns: MV_OK if success, other error code if fail.
  240. */
  241. int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
  242. u32 if_id, u32 reg_addr, u32 *data, u32 mask)
  243. {
  244. *data = reg_read(reg_addr) & mask;
  245. return MV_OK;
  246. }
  247. /*
  248. * Name: ddr3_tip_a38x_select_ddr_controller.
  249. * Desc: Enable/Disable access to Marvell's server.
  250. * Args: dev_num - device number
  251. * enable - whether to enable or disable the server
  252. * Notes:
  253. * Returns: MV_OK if success, other error code if fail.
  254. */
  255. int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
  256. {
  257. u32 reg;
  258. reg = reg_read(CS_ENABLE_REG);
  259. if (enable)
  260. reg |= (1 << 6);
  261. else
  262. reg &= ~(1 << 6);
  263. reg_write(CS_ENABLE_REG, reg);
  264. return MV_OK;
  265. }
  266. /*
  267. * Name: ddr3_tip_init_a38x_silicon.
  268. * Desc: init Training SW DB.
  269. * Args:
  270. * Notes:
  271. * Returns: MV_OK if success, other error code if fail.
  272. */
  273. static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
  274. {
  275. struct hws_tip_config_func_db config_func;
  276. enum hws_ddr_freq ddr_freq;
  277. int status;
  278. struct hws_topology_map *tm = ddr3_get_topology_map();
  279. /* new read leveling version */
  280. config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
  281. config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
  282. config_func.tip_dunit_mux_select_func =
  283. ddr3_tip_a38x_select_ddr_controller;
  284. config_func.tip_get_freq_config_info_func =
  285. ddr3_tip_a38x_get_freq_config;
  286. config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
  287. config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
  288. config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
  289. ddr3_tip_init_config_func(dev_num, &config_func);
  290. ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
  291. status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
  292. if (MV_OK != status) {
  293. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  294. ("DDR3 silicon get target frequency - FAILED 0x%x\n",
  295. status));
  296. return status;
  297. }
  298. rl_version = 1;
  299. mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
  300. LOAD_PATTERN_MASK_BIT |
  301. SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
  302. /* LOAD_PATTERN_2_MASK_BIT | */
  303. WRITE_LEVELING_SUPP_MASK_BIT |
  304. READ_LEVELING_MASK_BIT |
  305. PBS_RX_MASK_BIT |
  306. PBS_TX_MASK_BIT |
  307. SET_TARGET_FREQ_MASK_BIT |
  308. WRITE_LEVELING_TF_MASK_BIT |
  309. WRITE_LEVELING_SUPP_TF_MASK_BIT |
  310. READ_LEVELING_TF_MASK_BIT |
  311. CENTRALIZATION_RX_MASK_BIT |
  312. CENTRALIZATION_TX_MASK_BIT);
  313. rl_mid_freq_wa = 1;
  314. if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
  315. mask_tune_func = (WRITE_LEVELING_MASK_BIT |
  316. LOAD_PATTERN_2_MASK_BIT |
  317. WRITE_LEVELING_SUPP_MASK_BIT |
  318. READ_LEVELING_MASK_BIT |
  319. PBS_RX_MASK_BIT |
  320. PBS_TX_MASK_BIT |
  321. CENTRALIZATION_RX_MASK_BIT |
  322. CENTRALIZATION_TX_MASK_BIT);
  323. rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
  324. }
  325. /* Supplementary not supported for ECC modes */
  326. if (1 == ddr3_if_ecc_enabled()) {
  327. mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
  328. mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
  329. mask_tune_func &= ~PBS_TX_MASK_BIT;
  330. mask_tune_func &= ~PBS_RX_MASK_BIT;
  331. }
  332. if (ck_delay == -1)
  333. ck_delay = 160;
  334. if (ck_delay_16 == -1)
  335. ck_delay_16 = 160;
  336. ca_delay = 0;
  337. delay_enable = 1;
  338. calibration_update_control = 1;
  339. init_freq = tm->interface_params[first_active_if].memory_freq;
  340. ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
  341. return MV_OK;
  342. }
  343. int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
  344. {
  345. u32 if_id = 0;
  346. enum hws_ddr_freq freq;
  347. ddr3_tip_a38x_get_init_freq(dev_num, &freq);
  348. tm->interface_params[if_id].memory_freq = freq;
  349. /*
  350. * re-calc topology parameters according to topology updates
  351. * (if needed)
  352. */
  353. CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
  354. return MV_OK;
  355. }
  356. int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
  357. {
  358. struct hws_topology_map *tm = ddr3_get_topology_map();
  359. if (NULL == tm)
  360. return MV_FAIL;
  361. ddr3_a38x_update_topology_map(dev_num, tm);
  362. ddr3_tip_init_a38x_silicon(dev_num, board_id);
  363. return MV_OK;
  364. }
  365. int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
  366. {
  367. u32 reg;
  368. /* Read sample at reset setting */
  369. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  370. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  371. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  372. switch (reg) {
  373. case 0x0:
  374. case 0x1:
  375. *freq = DDR_FREQ_333;
  376. break;
  377. case 0x2:
  378. case 0x3:
  379. *freq = DDR_FREQ_400;
  380. break;
  381. case 0x4:
  382. case 0xd:
  383. *freq = DDR_FREQ_533;
  384. break;
  385. case 0x6:
  386. *freq = DDR_FREQ_600;
  387. break;
  388. case 0x8:
  389. case 0x11:
  390. case 0x14:
  391. *freq = DDR_FREQ_667;
  392. break;
  393. case 0xc:
  394. case 0x15:
  395. case 0x1b:
  396. *freq = DDR_FREQ_800;
  397. break;
  398. case 0x10:
  399. *freq = DDR_FREQ_933;
  400. break;
  401. case 0x12:
  402. *freq = DDR_FREQ_900;
  403. break;
  404. case 0x13:
  405. *freq = DDR_FREQ_900;
  406. break;
  407. default:
  408. *freq = 0;
  409. return MV_NOT_SUPPORTED;
  410. }
  411. return MV_OK;
  412. }
  413. int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
  414. {
  415. u32 reg;
  416. /* Read sample at reset setting */
  417. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  418. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  419. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  420. switch (reg) {
  421. case 0x0:
  422. case 0x1:
  423. /* Medium is same as TF to run PBS in this freq */
  424. *freq = DDR_FREQ_333;
  425. break;
  426. case 0x2:
  427. case 0x3:
  428. /* Medium is same as TF to run PBS in this freq */
  429. *freq = DDR_FREQ_400;
  430. break;
  431. case 0x4:
  432. case 0xd:
  433. *freq = DDR_FREQ_533;
  434. break;
  435. case 0x8:
  436. case 0x11:
  437. case 0x14:
  438. *freq = DDR_FREQ_333;
  439. break;
  440. case 0xc:
  441. case 0x15:
  442. case 0x1b:
  443. *freq = DDR_FREQ_400;
  444. break;
  445. case 0x6:
  446. *freq = DDR_FREQ_300;
  447. break;
  448. case 0x12:
  449. *freq = DDR_FREQ_360;
  450. break;
  451. case 0x13:
  452. *freq = DDR_FREQ_400;
  453. break;
  454. default:
  455. *freq = 0;
  456. return MV_NOT_SUPPORTED;
  457. }
  458. return MV_OK;
  459. }
  460. u32 ddr3_tip_get_init_freq(void)
  461. {
  462. enum hws_ddr_freq freq;
  463. ddr3_tip_a38x_get_init_freq(0, &freq);
  464. return freq;
  465. }
  466. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  467. enum hws_ddr_freq frequency)
  468. {
  469. u32 divider = 0;
  470. u32 sar_val;
  471. if (if_id != 0) {
  472. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  473. ("A38x does not support interface 0x%x\n",
  474. if_id));
  475. return MV_BAD_PARAM;
  476. }
  477. /* get VCO freq index */
  478. sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  479. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  480. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  481. divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
  482. /* Set Sync mode */
  483. CHECK_STATUS(ddr3_tip_a38x_if_write
  484. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
  485. 0x1000));
  486. CHECK_STATUS(ddr3_tip_a38x_if_write
  487. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
  488. 0x200));
  489. /* cpupll_clkdiv_reset_mask */
  490. CHECK_STATUS(ddr3_tip_a38x_if_write
  491. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
  492. 0xff));
  493. /* cpupll_clkdiv_reload_smooth */
  494. CHECK_STATUS(ddr3_tip_a38x_if_write
  495. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  496. (0x2 << 8), (0xff << 8)));
  497. /* cpupll_clkdiv_relax_en */
  498. CHECK_STATUS(ddr3_tip_a38x_if_write
  499. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  500. (0x2 << 24), (0xff << 24)));
  501. /* write the divider */
  502. CHECK_STATUS(ddr3_tip_a38x_if_write
  503. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
  504. (divider << 8), (0x3f << 8)));
  505. /* set cpupll_clkdiv_reload_ratio */
  506. CHECK_STATUS(ddr3_tip_a38x_if_write
  507. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
  508. (1 << 8), (1 << 8)));
  509. /* undet cpupll_clkdiv_reload_ratio */
  510. CHECK_STATUS(ddr3_tip_a38x_if_write
  511. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  512. (1 << 8)));
  513. /* clear cpupll_clkdiv_reload_force */
  514. CHECK_STATUS(ddr3_tip_a38x_if_write
  515. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  516. (0xff << 8)));
  517. /* clear cpupll_clkdiv_relax_en */
  518. CHECK_STATUS(ddr3_tip_a38x_if_write
  519. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  520. (0xff << 24)));
  521. /* clear cpupll_clkdiv_reset_mask */
  522. CHECK_STATUS(ddr3_tip_a38x_if_write
  523. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  524. 0xff));
  525. /* Dunit training clock + 1:1 mode */
  526. if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
  527. CHECK_STATUS(ddr3_tip_a38x_if_write
  528. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  529. (1 << 16), (1 << 16)));
  530. CHECK_STATUS(ddr3_tip_a38x_if_write
  531. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  532. (0 << 15), (1 << 15)));
  533. } else {
  534. CHECK_STATUS(ddr3_tip_a38x_if_write
  535. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  536. 0, (1 << 16)));
  537. CHECK_STATUS(ddr3_tip_a38x_if_write
  538. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  539. (1 << 15), (1 << 15)));
  540. }
  541. return MV_OK;
  542. }
  543. /*
  544. * external read from memory
  545. */
  546. int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
  547. u32 num_of_bursts, u32 *data)
  548. {
  549. u32 burst_num;
  550. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  551. data[burst_num] = readl(reg_addr + 4 * burst_num);
  552. return MV_OK;
  553. }
  554. /*
  555. * external write to memory
  556. */
  557. int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
  558. u32 num_of_bursts, u32 *data) {
  559. u32 burst_num;
  560. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  561. writel(data[burst_num], reg_addr + 4 * burst_num);
  562. return MV_OK;
  563. }
  564. int ddr3_silicon_pre_init(void)
  565. {
  566. return ddr3_silicon_init();
  567. }
  568. int ddr3_post_run_alg(void)
  569. {
  570. return MV_OK;
  571. }
  572. int ddr3_silicon_post_init(void)
  573. {
  574. struct hws_topology_map *tm = ddr3_get_topology_map();
  575. /* Set half bus width */
  576. if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
  577. CHECK_STATUS(ddr3_tip_if_write
  578. (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
  579. REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
  580. }
  581. return MV_OK;
  582. }
  583. int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
  584. {
  585. info_ptr->device_id = 0x6800;
  586. info_ptr->ck_delay = ck_delay;
  587. return MV_OK;
  588. }