ddr3_a38x.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include <common.h>
  6. #include <i2c.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. #define A38X_NUMBER_OF_INTERFACES 5
  13. #define SAR_DEV_ID_OFFS 27
  14. #define SAR_DEV_ID_MASK 0x7
  15. /* Termal Sensor Registers */
  16. #define TSEN_STATE_REG 0xe4070
  17. #define TSEN_STATE_OFFSET 31
  18. #define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
  19. #define TSEN_CONF_REG 0xe4074
  20. #define TSEN_CONF_RST_OFFSET 8
  21. #define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
  22. #define TSEN_STATUS_REG 0xe4078
  23. #define TSEN_STATUS_READOUT_VALID_OFFSET 10
  24. #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
  25. TSEN_STATUS_READOUT_VALID_OFFSET)
  26. #define TSEN_STATUS_TEMP_OUT_OFFSET 0
  27. #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
  28. static struct dfx_access interface_map[] = {
  29. /* Pipe Client */
  30. { 0, 17 },
  31. { 1, 7 },
  32. { 1, 11 },
  33. { 0, 3 },
  34. { 1, 25 },
  35. { 0, 0 },
  36. { 0, 0 },
  37. { 0, 0 },
  38. { 0, 0 },
  39. { 0, 0 },
  40. { 0, 0 },
  41. { 0, 0 }
  42. };
  43. /* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
  44. struct trip_delay_element a38x_board_round_trip_delay_array[] = {
  45. /* 1st board */
  46. /* Interface bus DQS-delay CK-delay */
  47. { 3952, 5060 },
  48. { 3192, 4493 },
  49. { 4785, 6677 },
  50. { 3413, 7267 },
  51. { 4282, 6086 }, /* ECC PUP */
  52. { 3952, 5134 },
  53. { 3192, 4567 },
  54. { 4785, 6751 },
  55. { 3413, 7341 },
  56. { 4282, 6160 }, /* ECC PUP */
  57. /* 2nd board */
  58. /* Interface bus DQS-delay CK-delay */
  59. { 3952, 5060 },
  60. { 3192, 4493 },
  61. { 4785, 6677 },
  62. { 3413, 7267 },
  63. { 4282, 6086 }, /* ECC PUP */
  64. { 3952, 5134 },
  65. { 3192, 4567 },
  66. { 4785, 6751 },
  67. { 3413, 7341 },
  68. { 4282, 6160 } /* ECC PUP */
  69. };
  70. #ifdef STATIC_ALGO_SUPPORT
  71. /* package trace */
  72. static struct trip_delay_element a38x_package_round_trip_delay_array[] = {
  73. /* IF BUS DQ_DELAY CK_DELAY */
  74. { 0, 0 },
  75. { 0, 0 },
  76. { 0, 0 },
  77. { 0, 0 },
  78. { 0, 0 },
  79. { 0, 0 },
  80. { 0, 0 },
  81. { 0, 0 },
  82. { 0, 0 },
  83. { 0, 0 },
  84. { 0, 0 },
  85. { 0, 0 },
  86. { 0, 0 },
  87. { 0, 0 },
  88. { 0, 0 },
  89. { 0, 0 },
  90. { 0, 0 },
  91. { 0, 0 },
  92. { 0, 0 },
  93. { 0, 0 }
  94. };
  95. static int a38x_silicon_delay_offset[] = {
  96. /* board 0 */
  97. 0,
  98. /* board 1 */
  99. 0,
  100. /* board 2 */
  101. 0
  102. };
  103. #endif
  104. static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
  105. 0x3, /* DDR_FREQ_100 */
  106. 0x4, /* DDR_FREQ_400 */
  107. 0x4, /* DDR_FREQ_533 */
  108. 0x5, /* DDR_FREQ_667 */
  109. 0x5, /* DDR_FREQ_800 */
  110. 0x5, /* DDR_FREQ_933 */
  111. 0x5, /* DDR_FREQ_1066 */
  112. 0x3, /* DDR_FREQ_311 */
  113. 0x3, /* DDR_FREQ_333 */
  114. 0x4, /* DDR_FREQ_467 */
  115. 0x5, /* DDR_FREQ_850 */
  116. 0x5, /* DDR_FREQ_600 */
  117. 0x3, /* DDR_FREQ_300 */
  118. 0x5, /* DDR_FREQ_900 */
  119. 0x3, /* DDR_FREQ_360 */
  120. 0x5 /* DDR_FREQ_1000 */
  121. };
  122. static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
  123. /*TBD*/ 0x1, /* DDR_FREQ_100 */
  124. 0x2, /* DDR_FREQ_400 */
  125. 0x2, /* DDR_FREQ_533 */
  126. 0x2, /* DDR_FREQ_667 */
  127. 0x2, /* DDR_FREQ_800 */
  128. 0x3, /* DDR_FREQ_933 */
  129. 0x3, /* DDR_FREQ_1066 */
  130. 0x1, /* DDR_FREQ_311 */
  131. 0x1, /* DDR_FREQ_333 */
  132. 0x2, /* DDR_FREQ_467 */
  133. 0x2, /* DDR_FREQ_850 */
  134. 0x2, /* DDR_FREQ_600 */
  135. 0x1, /* DDR_FREQ_300 */
  136. 0x2, /* DDR_FREQ_900 */
  137. 0x1, /* DDR_FREQ_360 */
  138. 0x2 /* DDR_FREQ_1000 */
  139. };
  140. static u16 a38x_vco_freq_per_sar[] = {
  141. 666, /* 0 */
  142. 1332,
  143. 800,
  144. 1600,
  145. 1066,
  146. 2132,
  147. 1200,
  148. 2400,
  149. 1332,
  150. 1332,
  151. 1500,
  152. 1500,
  153. 1600, /* 12 */
  154. 1600,
  155. 1700,
  156. 1700,
  157. 1866,
  158. 1866,
  159. 1800, /* 18 */
  160. 2000,
  161. 2000,
  162. 4000,
  163. 2132,
  164. 2132,
  165. 2300,
  166. 2300,
  167. 2400,
  168. 2400,
  169. 2500,
  170. 2500,
  171. 800
  172. };
  173. u32 pipe_multicast_mask;
  174. u32 dq_bit_map_2_phy_pin[] = {
  175. 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
  176. 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
  177. 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
  178. 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
  179. 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
  180. };
  181. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  182. enum hws_ddr_freq freq);
  183. /*
  184. * Read temperature TJ value
  185. */
  186. u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
  187. {
  188. int reg = 0;
  189. /* Initiates TSEN hardware reset once */
  190. if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
  191. reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
  192. mdelay(10);
  193. /* Check if the readout field is valid */
  194. if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
  195. printf("%s: TSEN not ready\n", __func__);
  196. return 0;
  197. }
  198. reg = reg_read(TSEN_STATUS_REG);
  199. reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
  200. return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
  201. }
  202. /*
  203. * Name: ddr3_tip_a38x_get_freq_config.
  204. * Desc:
  205. * Args:
  206. * Notes:
  207. * Returns: MV_OK if success, other error code if fail.
  208. */
  209. int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
  210. struct hws_tip_freq_config_info
  211. *freq_config_info)
  212. {
  213. if (a38x_bw_per_freq[freq] == 0xff)
  214. return MV_NOT_SUPPORTED;
  215. if (freq_config_info == NULL)
  216. return MV_BAD_PARAM;
  217. freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
  218. freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
  219. freq_config_info->is_supported = 1;
  220. return MV_OK;
  221. }
  222. /*
  223. * Name: ddr3_tip_a38x_pipe_enable.
  224. * Desc:
  225. * Args:
  226. * Notes:
  227. * Returns: MV_OK if success, other error code if fail.
  228. */
  229. int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
  230. u32 if_id, int enable)
  231. {
  232. u32 data_value, pipe_enable_mask = 0;
  233. if (enable == 0) {
  234. pipe_enable_mask = 0;
  235. } else {
  236. if (interface_access == ACCESS_TYPE_MULTICAST)
  237. pipe_enable_mask = pipe_multicast_mask;
  238. else
  239. pipe_enable_mask = (1 << interface_map[if_id].pipe);
  240. }
  241. CHECK_STATUS(ddr3_tip_reg_read
  242. (dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
  243. data_value = (data_value & (~0xff)) | pipe_enable_mask;
  244. CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
  245. return MV_OK;
  246. }
  247. /*
  248. * Name: ddr3_tip_a38x_if_write.
  249. * Desc:
  250. * Args:
  251. * Notes:
  252. * Returns: MV_OK if success, other error code if fail.
  253. */
  254. int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
  255. u32 if_id, u32 reg_addr, u32 data_value,
  256. u32 mask)
  257. {
  258. u32 ui_data_read;
  259. if (mask != MASK_ALL_BITS) {
  260. CHECK_STATUS(ddr3_tip_a38x_if_read
  261. (dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
  262. &ui_data_read, MASK_ALL_BITS));
  263. data_value = (ui_data_read & (~mask)) | (data_value & mask);
  264. }
  265. reg_write(reg_addr, data_value);
  266. return MV_OK;
  267. }
  268. /*
  269. * Name: ddr3_tip_a38x_if_read.
  270. * Desc:
  271. * Args:
  272. * Notes:
  273. * Returns: MV_OK if success, other error code if fail.
  274. */
  275. int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
  276. u32 if_id, u32 reg_addr, u32 *data, u32 mask)
  277. {
  278. *data = reg_read(reg_addr) & mask;
  279. return MV_OK;
  280. }
  281. /*
  282. * Name: ddr3_tip_a38x_select_ddr_controller.
  283. * Desc: Enable/Disable access to Marvell's server.
  284. * Args: dev_num - device number
  285. * enable - whether to enable or disable the server
  286. * Notes:
  287. * Returns: MV_OK if success, other error code if fail.
  288. */
  289. int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
  290. {
  291. u32 reg;
  292. reg = reg_read(CS_ENABLE_REG);
  293. if (enable)
  294. reg |= (1 << 6);
  295. else
  296. reg &= ~(1 << 6);
  297. reg_write(CS_ENABLE_REG, reg);
  298. return MV_OK;
  299. }
  300. /*
  301. * Name: ddr3_tip_init_a38x_silicon.
  302. * Desc: init Training SW DB.
  303. * Args:
  304. * Notes:
  305. * Returns: MV_OK if success, other error code if fail.
  306. */
  307. static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
  308. {
  309. struct hws_tip_config_func_db config_func;
  310. enum hws_ddr_freq ddr_freq;
  311. int status;
  312. struct hws_topology_map *tm = ddr3_get_topology_map();
  313. /* new read leveling version */
  314. config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
  315. config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
  316. config_func.tip_dunit_mux_select_func =
  317. ddr3_tip_a38x_select_ddr_controller;
  318. config_func.tip_get_freq_config_info_func =
  319. ddr3_tip_a38x_get_freq_config;
  320. config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
  321. config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
  322. config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
  323. ddr3_tip_init_config_func(dev_num, &config_func);
  324. ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
  325. #ifdef STATIC_ALGO_SUPPORT
  326. {
  327. struct hws_tip_static_config_info static_config;
  328. u32 board_offset =
  329. board_id * A38X_NUMBER_OF_INTERFACES *
  330. tm->num_of_bus_per_interface;
  331. static_config.silicon_delay =
  332. a38x_silicon_delay_offset[board_id];
  333. static_config.package_trace_arr =
  334. a38x_package_round_trip_delay_array;
  335. static_config.board_trace_arr =
  336. &a38x_board_round_trip_delay_array[board_offset];
  337. ddr3_tip_init_static_config_db(dev_num, &static_config);
  338. }
  339. #endif
  340. status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
  341. if (MV_OK != status) {
  342. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  343. ("DDR3 silicon get target frequency - FAILED 0x%x\n",
  344. status));
  345. return status;
  346. }
  347. rl_version = 1;
  348. mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
  349. LOAD_PATTERN_MASK_BIT |
  350. SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
  351. /* LOAD_PATTERN_2_MASK_BIT | */
  352. WRITE_LEVELING_SUPP_MASK_BIT |
  353. READ_LEVELING_MASK_BIT |
  354. PBS_RX_MASK_BIT |
  355. PBS_TX_MASK_BIT |
  356. SET_TARGET_FREQ_MASK_BIT |
  357. WRITE_LEVELING_TF_MASK_BIT |
  358. WRITE_LEVELING_SUPP_TF_MASK_BIT |
  359. READ_LEVELING_TF_MASK_BIT |
  360. CENTRALIZATION_RX_MASK_BIT |
  361. CENTRALIZATION_TX_MASK_BIT);
  362. rl_mid_freq_wa = 1;
  363. if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
  364. mask_tune_func = (WRITE_LEVELING_MASK_BIT |
  365. LOAD_PATTERN_2_MASK_BIT |
  366. WRITE_LEVELING_SUPP_MASK_BIT |
  367. READ_LEVELING_MASK_BIT |
  368. PBS_RX_MASK_BIT |
  369. PBS_TX_MASK_BIT |
  370. CENTRALIZATION_RX_MASK_BIT |
  371. CENTRALIZATION_TX_MASK_BIT);
  372. rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
  373. }
  374. /* Supplementary not supported for ECC modes */
  375. if (1 == ddr3_if_ecc_enabled()) {
  376. mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
  377. mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
  378. mask_tune_func &= ~PBS_TX_MASK_BIT;
  379. mask_tune_func &= ~PBS_RX_MASK_BIT;
  380. }
  381. if (ck_delay == -1)
  382. ck_delay = 160;
  383. if (ck_delay_16 == -1)
  384. ck_delay_16 = 160;
  385. ca_delay = 0;
  386. delay_enable = 1;
  387. calibration_update_control = 1;
  388. init_freq = tm->interface_params[first_active_if].memory_freq;
  389. ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
  390. return MV_OK;
  391. }
  392. int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
  393. {
  394. u32 if_id = 0;
  395. enum hws_ddr_freq freq;
  396. ddr3_tip_a38x_get_init_freq(dev_num, &freq);
  397. tm->interface_params[if_id].memory_freq = freq;
  398. /*
  399. * re-calc topology parameters according to topology updates
  400. * (if needed)
  401. */
  402. CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
  403. return MV_OK;
  404. }
  405. int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
  406. {
  407. struct hws_topology_map *tm = ddr3_get_topology_map();
  408. if (NULL == tm)
  409. return MV_FAIL;
  410. ddr3_a38x_update_topology_map(dev_num, tm);
  411. ddr3_tip_init_a38x_silicon(dev_num, board_id);
  412. return MV_OK;
  413. }
  414. int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
  415. {
  416. u32 reg;
  417. /* Read sample at reset setting */
  418. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  419. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  420. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  421. switch (reg) {
  422. case 0x0:
  423. case 0x1:
  424. *freq = DDR_FREQ_333;
  425. break;
  426. case 0x2:
  427. case 0x3:
  428. *freq = DDR_FREQ_400;
  429. break;
  430. case 0x4:
  431. case 0xd:
  432. *freq = DDR_FREQ_533;
  433. break;
  434. case 0x6:
  435. *freq = DDR_FREQ_600;
  436. break;
  437. case 0x8:
  438. case 0x11:
  439. case 0x14:
  440. *freq = DDR_FREQ_667;
  441. break;
  442. case 0xc:
  443. case 0x15:
  444. case 0x1b:
  445. *freq = DDR_FREQ_800;
  446. break;
  447. case 0x10:
  448. *freq = DDR_FREQ_933;
  449. break;
  450. case 0x12:
  451. *freq = DDR_FREQ_900;
  452. break;
  453. case 0x13:
  454. *freq = DDR_FREQ_900;
  455. break;
  456. default:
  457. *freq = 0;
  458. return MV_NOT_SUPPORTED;
  459. }
  460. return MV_OK;
  461. }
  462. int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
  463. {
  464. u32 reg;
  465. /* Read sample at reset setting */
  466. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  467. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  468. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  469. switch (reg) {
  470. case 0x0:
  471. case 0x1:
  472. /* Medium is same as TF to run PBS in this freq */
  473. *freq = DDR_FREQ_333;
  474. break;
  475. case 0x2:
  476. case 0x3:
  477. /* Medium is same as TF to run PBS in this freq */
  478. *freq = DDR_FREQ_400;
  479. break;
  480. case 0x4:
  481. case 0xd:
  482. *freq = DDR_FREQ_533;
  483. break;
  484. case 0x8:
  485. case 0x11:
  486. case 0x14:
  487. *freq = DDR_FREQ_333;
  488. break;
  489. case 0xc:
  490. case 0x15:
  491. case 0x1b:
  492. *freq = DDR_FREQ_400;
  493. break;
  494. case 0x6:
  495. *freq = DDR_FREQ_300;
  496. break;
  497. case 0x12:
  498. *freq = DDR_FREQ_360;
  499. break;
  500. case 0x13:
  501. *freq = DDR_FREQ_400;
  502. break;
  503. default:
  504. *freq = 0;
  505. return MV_NOT_SUPPORTED;
  506. }
  507. return MV_OK;
  508. }
  509. u32 ddr3_tip_get_init_freq(void)
  510. {
  511. enum hws_ddr_freq freq;
  512. ddr3_tip_a38x_get_init_freq(0, &freq);
  513. return freq;
  514. }
  515. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  516. enum hws_ddr_freq frequency)
  517. {
  518. u32 divider = 0;
  519. u32 sar_val;
  520. if (if_id != 0) {
  521. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  522. ("A38x does not support interface 0x%x\n",
  523. if_id));
  524. return MV_BAD_PARAM;
  525. }
  526. /* get VCO freq index */
  527. sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  528. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  529. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  530. divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
  531. /* Set Sync mode */
  532. CHECK_STATUS(ddr3_tip_a38x_if_write
  533. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
  534. 0x1000));
  535. CHECK_STATUS(ddr3_tip_a38x_if_write
  536. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
  537. 0x200));
  538. /* cpupll_clkdiv_reset_mask */
  539. CHECK_STATUS(ddr3_tip_a38x_if_write
  540. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
  541. 0xff));
  542. /* cpupll_clkdiv_reload_smooth */
  543. CHECK_STATUS(ddr3_tip_a38x_if_write
  544. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  545. (0x2 << 8), (0xff << 8)));
  546. /* cpupll_clkdiv_relax_en */
  547. CHECK_STATUS(ddr3_tip_a38x_if_write
  548. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  549. (0x2 << 24), (0xff << 24)));
  550. /* write the divider */
  551. CHECK_STATUS(ddr3_tip_a38x_if_write
  552. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
  553. (divider << 8), (0x3f << 8)));
  554. /* set cpupll_clkdiv_reload_ratio */
  555. CHECK_STATUS(ddr3_tip_a38x_if_write
  556. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
  557. (1 << 8), (1 << 8)));
  558. /* undet cpupll_clkdiv_reload_ratio */
  559. CHECK_STATUS(ddr3_tip_a38x_if_write
  560. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  561. (1 << 8)));
  562. /* clear cpupll_clkdiv_reload_force */
  563. CHECK_STATUS(ddr3_tip_a38x_if_write
  564. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  565. (0xff << 8)));
  566. /* clear cpupll_clkdiv_relax_en */
  567. CHECK_STATUS(ddr3_tip_a38x_if_write
  568. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  569. (0xff << 24)));
  570. /* clear cpupll_clkdiv_reset_mask */
  571. CHECK_STATUS(ddr3_tip_a38x_if_write
  572. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  573. 0xff));
  574. /* Dunit training clock + 1:1 mode */
  575. if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
  576. CHECK_STATUS(ddr3_tip_a38x_if_write
  577. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  578. (1 << 16), (1 << 16)));
  579. CHECK_STATUS(ddr3_tip_a38x_if_write
  580. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  581. (0 << 15), (1 << 15)));
  582. } else {
  583. CHECK_STATUS(ddr3_tip_a38x_if_write
  584. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  585. 0, (1 << 16)));
  586. CHECK_STATUS(ddr3_tip_a38x_if_write
  587. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  588. (1 << 15), (1 << 15)));
  589. }
  590. return MV_OK;
  591. }
  592. /*
  593. * external read from memory
  594. */
  595. int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
  596. u32 num_of_bursts, u32 *data)
  597. {
  598. u32 burst_num;
  599. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  600. data[burst_num] = readl(reg_addr + 4 * burst_num);
  601. return MV_OK;
  602. }
  603. /*
  604. * external write to memory
  605. */
  606. int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
  607. u32 num_of_bursts, u32 *data) {
  608. u32 burst_num;
  609. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  610. writel(data[burst_num], reg_addr + 4 * burst_num);
  611. return MV_OK;
  612. }
  613. int ddr3_silicon_pre_init(void)
  614. {
  615. return ddr3_silicon_init();
  616. }
  617. int ddr3_post_run_alg(void)
  618. {
  619. return MV_OK;
  620. }
  621. int ddr3_silicon_post_init(void)
  622. {
  623. struct hws_topology_map *tm = ddr3_get_topology_map();
  624. /* Set half bus width */
  625. if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
  626. CHECK_STATUS(ddr3_tip_if_write
  627. (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
  628. REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
  629. }
  630. return MV_OK;
  631. }
  632. int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
  633. {
  634. info_ptr->device_id = 0x6800;
  635. info_ptr->ck_delay = ck_delay;
  636. return MV_OK;
  637. }