ddr3_a38x.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <i2c.h>
  8. #include <spl.h>
  9. #include <asm/io.h>
  10. #include <asm/arch/cpu.h>
  11. #include <asm/arch/soc.h>
  12. #include "ddr3_init.h"
  13. #define A38X_NUMBER_OF_INTERFACES 5
  14. #define SAR_DEV_ID_OFFS 27
  15. #define SAR_DEV_ID_MASK 0x7
  16. /* Termal Sensor Registers */
  17. #define TSEN_STATE_REG 0xe4070
  18. #define TSEN_STATE_OFFSET 31
  19. #define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
  20. #define TSEN_CONF_REG 0xe4074
  21. #define TSEN_CONF_RST_OFFSET 8
  22. #define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
  23. #define TSEN_STATUS_REG 0xe4078
  24. #define TSEN_STATUS_READOUT_VALID_OFFSET 10
  25. #define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
  26. TSEN_STATUS_READOUT_VALID_OFFSET)
  27. #define TSEN_STATUS_TEMP_OUT_OFFSET 0
  28. #define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
  29. static struct dfx_access interface_map[] = {
  30. /* Pipe Client */
  31. { 0, 17 },
  32. { 1, 7 },
  33. { 1, 11 },
  34. { 0, 3 },
  35. { 1, 25 },
  36. { 0, 0 },
  37. { 0, 0 },
  38. { 0, 0 },
  39. { 0, 0 },
  40. { 0, 0 },
  41. { 0, 0 },
  42. { 0, 0 }
  43. };
  44. /* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
  45. struct trip_delay_element a38x_board_round_trip_delay_array[] = {
  46. /* 1st board */
  47. /* Interface bus DQS-delay CK-delay */
  48. { 3952, 5060 },
  49. { 3192, 4493 },
  50. { 4785, 6677 },
  51. { 3413, 7267 },
  52. { 4282, 6086 }, /* ECC PUP */
  53. { 3952, 5134 },
  54. { 3192, 4567 },
  55. { 4785, 6751 },
  56. { 3413, 7341 },
  57. { 4282, 6160 }, /* ECC PUP */
  58. /* 2nd board */
  59. /* Interface bus DQS-delay CK-delay */
  60. { 3952, 5060 },
  61. { 3192, 4493 },
  62. { 4785, 6677 },
  63. { 3413, 7267 },
  64. { 4282, 6086 }, /* ECC PUP */
  65. { 3952, 5134 },
  66. { 3192, 4567 },
  67. { 4785, 6751 },
  68. { 3413, 7341 },
  69. { 4282, 6160 } /* ECC PUP */
  70. };
  71. #ifdef STATIC_ALGO_SUPPORT
  72. /* package trace */
  73. static struct trip_delay_element a38x_package_round_trip_delay_array[] = {
  74. /* IF BUS DQ_DELAY CK_DELAY */
  75. { 0, 0 },
  76. { 0, 0 },
  77. { 0, 0 },
  78. { 0, 0 },
  79. { 0, 0 },
  80. { 0, 0 },
  81. { 0, 0 },
  82. { 0, 0 },
  83. { 0, 0 },
  84. { 0, 0 },
  85. { 0, 0 },
  86. { 0, 0 },
  87. { 0, 0 },
  88. { 0, 0 },
  89. { 0, 0 },
  90. { 0, 0 },
  91. { 0, 0 },
  92. { 0, 0 },
  93. { 0, 0 },
  94. { 0, 0 }
  95. };
  96. static int a38x_silicon_delay_offset[] = {
  97. /* board 0 */
  98. 0,
  99. /* board 1 */
  100. 0,
  101. /* board 2 */
  102. 0
  103. };
  104. #endif
  105. static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
  106. 0x3, /* DDR_FREQ_100 */
  107. 0x4, /* DDR_FREQ_400 */
  108. 0x4, /* DDR_FREQ_533 */
  109. 0x5, /* DDR_FREQ_667 */
  110. 0x5, /* DDR_FREQ_800 */
  111. 0x5, /* DDR_FREQ_933 */
  112. 0x5, /* DDR_FREQ_1066 */
  113. 0x3, /* DDR_FREQ_311 */
  114. 0x3, /* DDR_FREQ_333 */
  115. 0x4, /* DDR_FREQ_467 */
  116. 0x5, /* DDR_FREQ_850 */
  117. 0x5, /* DDR_FREQ_600 */
  118. 0x3, /* DDR_FREQ_300 */
  119. 0x5, /* DDR_FREQ_900 */
  120. 0x3, /* DDR_FREQ_360 */
  121. 0x5 /* DDR_FREQ_1000 */
  122. };
  123. static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
  124. /*TBD*/ 0x1, /* DDR_FREQ_100 */
  125. 0x2, /* DDR_FREQ_400 */
  126. 0x2, /* DDR_FREQ_533 */
  127. 0x2, /* DDR_FREQ_667 */
  128. 0x2, /* DDR_FREQ_800 */
  129. 0x3, /* DDR_FREQ_933 */
  130. 0x3, /* DDR_FREQ_1066 */
  131. 0x1, /* DDR_FREQ_311 */
  132. 0x1, /* DDR_FREQ_333 */
  133. 0x2, /* DDR_FREQ_467 */
  134. 0x2, /* DDR_FREQ_850 */
  135. 0x2, /* DDR_FREQ_600 */
  136. 0x1, /* DDR_FREQ_300 */
  137. 0x2, /* DDR_FREQ_900 */
  138. 0x1, /* DDR_FREQ_360 */
  139. 0x2 /* DDR_FREQ_1000 */
  140. };
  141. static u16 a38x_vco_freq_per_sar[] = {
  142. 666, /* 0 */
  143. 1332,
  144. 800,
  145. 1600,
  146. 1066,
  147. 2132,
  148. 1200,
  149. 2400,
  150. 1332,
  151. 1332,
  152. 1500,
  153. 1500,
  154. 1600, /* 12 */
  155. 1600,
  156. 1700,
  157. 1700,
  158. 1866,
  159. 1866,
  160. 1800, /* 18 */
  161. 2000,
  162. 2000,
  163. 4000,
  164. 2132,
  165. 2132,
  166. 2300,
  167. 2300,
  168. 2400,
  169. 2400,
  170. 2500,
  171. 2500,
  172. 800
  173. };
  174. u32 pipe_multicast_mask;
  175. u32 dq_bit_map_2_phy_pin[] = {
  176. 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
  177. 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
  178. 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
  179. 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
  180. 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
  181. };
  182. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  183. enum hws_ddr_freq freq);
  184. /*
  185. * Read temperature TJ value
  186. */
  187. u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
  188. {
  189. int reg = 0;
  190. /* Initiates TSEN hardware reset once */
  191. if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
  192. reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
  193. mdelay(10);
  194. /* Check if the readout field is valid */
  195. if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
  196. printf("%s: TSEN not ready\n", __func__);
  197. return 0;
  198. }
  199. reg = reg_read(TSEN_STATUS_REG);
  200. reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
  201. return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
  202. }
  203. /*
  204. * Name: ddr3_tip_a38x_get_freq_config.
  205. * Desc:
  206. * Args:
  207. * Notes:
  208. * Returns: MV_OK if success, other error code if fail.
  209. */
  210. int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
  211. struct hws_tip_freq_config_info
  212. *freq_config_info)
  213. {
  214. if (a38x_bw_per_freq[freq] == 0xff)
  215. return MV_NOT_SUPPORTED;
  216. if (freq_config_info == NULL)
  217. return MV_BAD_PARAM;
  218. freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
  219. freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
  220. freq_config_info->is_supported = 1;
  221. return MV_OK;
  222. }
  223. /*
  224. * Name: ddr3_tip_a38x_pipe_enable.
  225. * Desc:
  226. * Args:
  227. * Notes:
  228. * Returns: MV_OK if success, other error code if fail.
  229. */
  230. int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
  231. u32 if_id, int enable)
  232. {
  233. u32 data_value, pipe_enable_mask = 0;
  234. if (enable == 0) {
  235. pipe_enable_mask = 0;
  236. } else {
  237. if (interface_access == ACCESS_TYPE_MULTICAST)
  238. pipe_enable_mask = pipe_multicast_mask;
  239. else
  240. pipe_enable_mask = (1 << interface_map[if_id].pipe);
  241. }
  242. CHECK_STATUS(ddr3_tip_reg_read
  243. (dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
  244. data_value = (data_value & (~0xff)) | pipe_enable_mask;
  245. CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
  246. return MV_OK;
  247. }
  248. /*
  249. * Name: ddr3_tip_a38x_if_write.
  250. * Desc:
  251. * Args:
  252. * Notes:
  253. * Returns: MV_OK if success, other error code if fail.
  254. */
  255. int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
  256. u32 if_id, u32 reg_addr, u32 data_value,
  257. u32 mask)
  258. {
  259. u32 ui_data_read;
  260. if (mask != MASK_ALL_BITS) {
  261. CHECK_STATUS(ddr3_tip_a38x_if_read
  262. (dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
  263. &ui_data_read, MASK_ALL_BITS));
  264. data_value = (ui_data_read & (~mask)) | (data_value & mask);
  265. }
  266. reg_write(reg_addr, data_value);
  267. return MV_OK;
  268. }
  269. /*
  270. * Name: ddr3_tip_a38x_if_read.
  271. * Desc:
  272. * Args:
  273. * Notes:
  274. * Returns: MV_OK if success, other error code if fail.
  275. */
  276. int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
  277. u32 if_id, u32 reg_addr, u32 *data, u32 mask)
  278. {
  279. *data = reg_read(reg_addr) & mask;
  280. return MV_OK;
  281. }
  282. /*
  283. * Name: ddr3_tip_a38x_select_ddr_controller.
  284. * Desc: Enable/Disable access to Marvell's server.
  285. * Args: dev_num - device number
  286. * enable - whether to enable or disable the server
  287. * Notes:
  288. * Returns: MV_OK if success, other error code if fail.
  289. */
  290. int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
  291. {
  292. u32 reg;
  293. reg = reg_read(CS_ENABLE_REG);
  294. if (enable)
  295. reg |= (1 << 6);
  296. else
  297. reg &= ~(1 << 6);
  298. reg_write(CS_ENABLE_REG, reg);
  299. return MV_OK;
  300. }
  301. /*
  302. * Name: ddr3_tip_init_a38x_silicon.
  303. * Desc: init Training SW DB.
  304. * Args:
  305. * Notes:
  306. * Returns: MV_OK if success, other error code if fail.
  307. */
  308. static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
  309. {
  310. struct hws_tip_config_func_db config_func;
  311. enum hws_ddr_freq ddr_freq;
  312. int status;
  313. struct hws_topology_map *tm = ddr3_get_topology_map();
  314. /* new read leveling version */
  315. config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
  316. config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
  317. config_func.tip_dunit_mux_select_func =
  318. ddr3_tip_a38x_select_ddr_controller;
  319. config_func.tip_get_freq_config_info_func =
  320. ddr3_tip_a38x_get_freq_config;
  321. config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
  322. config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
  323. config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
  324. ddr3_tip_init_config_func(dev_num, &config_func);
  325. ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
  326. #ifdef STATIC_ALGO_SUPPORT
  327. {
  328. struct hws_tip_static_config_info static_config;
  329. u32 board_offset =
  330. board_id * A38X_NUMBER_OF_INTERFACES *
  331. tm->num_of_bus_per_interface;
  332. static_config.silicon_delay =
  333. a38x_silicon_delay_offset[board_id];
  334. static_config.package_trace_arr =
  335. a38x_package_round_trip_delay_array;
  336. static_config.board_trace_arr =
  337. &a38x_board_round_trip_delay_array[board_offset];
  338. ddr3_tip_init_static_config_db(dev_num, &static_config);
  339. }
  340. #endif
  341. status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
  342. if (MV_OK != status) {
  343. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  344. ("DDR3 silicon get target frequency - FAILED 0x%x\n",
  345. status));
  346. return status;
  347. }
  348. rl_version = 1;
  349. mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
  350. LOAD_PATTERN_MASK_BIT |
  351. SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
  352. /* LOAD_PATTERN_2_MASK_BIT | */
  353. WRITE_LEVELING_SUPP_MASK_BIT |
  354. READ_LEVELING_MASK_BIT |
  355. PBS_RX_MASK_BIT |
  356. PBS_TX_MASK_BIT |
  357. SET_TARGET_FREQ_MASK_BIT |
  358. WRITE_LEVELING_TF_MASK_BIT |
  359. WRITE_LEVELING_SUPP_TF_MASK_BIT |
  360. READ_LEVELING_TF_MASK_BIT |
  361. CENTRALIZATION_RX_MASK_BIT |
  362. CENTRALIZATION_TX_MASK_BIT);
  363. rl_mid_freq_wa = 1;
  364. if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
  365. mask_tune_func = (WRITE_LEVELING_MASK_BIT |
  366. LOAD_PATTERN_2_MASK_BIT |
  367. WRITE_LEVELING_SUPP_MASK_BIT |
  368. READ_LEVELING_MASK_BIT |
  369. PBS_RX_MASK_BIT |
  370. PBS_TX_MASK_BIT |
  371. CENTRALIZATION_RX_MASK_BIT |
  372. CENTRALIZATION_TX_MASK_BIT);
  373. rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
  374. }
  375. /* Supplementary not supported for ECC modes */
  376. if (1 == ddr3_if_ecc_enabled()) {
  377. mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
  378. mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
  379. mask_tune_func &= ~PBS_TX_MASK_BIT;
  380. mask_tune_func &= ~PBS_RX_MASK_BIT;
  381. }
  382. if (ck_delay == -1)
  383. ck_delay = 160;
  384. if (ck_delay_16 == -1)
  385. ck_delay_16 = 160;
  386. ca_delay = 0;
  387. delay_enable = 1;
  388. calibration_update_control = 1;
  389. init_freq = tm->interface_params[first_active_if].memory_freq;
  390. ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
  391. return MV_OK;
  392. }
  393. int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
  394. {
  395. u32 if_id = 0;
  396. enum hws_ddr_freq freq;
  397. ddr3_tip_a38x_get_init_freq(dev_num, &freq);
  398. tm->interface_params[if_id].memory_freq = freq;
  399. /*
  400. * re-calc topology parameters according to topology updates
  401. * (if needed)
  402. */
  403. CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
  404. return MV_OK;
  405. }
  406. int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
  407. {
  408. struct hws_topology_map *tm = ddr3_get_topology_map();
  409. if (NULL == tm)
  410. return MV_FAIL;
  411. ddr3_a38x_update_topology_map(dev_num, tm);
  412. ddr3_tip_init_a38x_silicon(dev_num, board_id);
  413. return MV_OK;
  414. }
  415. int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
  416. {
  417. u32 reg;
  418. /* Read sample at reset setting */
  419. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  420. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  421. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  422. switch (reg) {
  423. case 0x0:
  424. case 0x1:
  425. *freq = DDR_FREQ_333;
  426. break;
  427. case 0x2:
  428. case 0x3:
  429. *freq = DDR_FREQ_400;
  430. break;
  431. case 0x4:
  432. case 0xd:
  433. *freq = DDR_FREQ_533;
  434. break;
  435. case 0x6:
  436. *freq = DDR_FREQ_600;
  437. break;
  438. case 0x8:
  439. case 0x11:
  440. case 0x14:
  441. *freq = DDR_FREQ_667;
  442. break;
  443. case 0xc:
  444. case 0x15:
  445. case 0x1b:
  446. *freq = DDR_FREQ_800;
  447. break;
  448. case 0x10:
  449. *freq = DDR_FREQ_933;
  450. break;
  451. case 0x12:
  452. *freq = DDR_FREQ_900;
  453. break;
  454. case 0x13:
  455. *freq = DDR_FREQ_900;
  456. break;
  457. default:
  458. *freq = 0;
  459. return MV_NOT_SUPPORTED;
  460. }
  461. return MV_OK;
  462. }
  463. int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
  464. {
  465. u32 reg;
  466. /* Read sample at reset setting */
  467. reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  468. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  469. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  470. switch (reg) {
  471. case 0x0:
  472. case 0x1:
  473. /* Medium is same as TF to run PBS in this freq */
  474. *freq = DDR_FREQ_333;
  475. break;
  476. case 0x2:
  477. case 0x3:
  478. /* Medium is same as TF to run PBS in this freq */
  479. *freq = DDR_FREQ_400;
  480. break;
  481. case 0x4:
  482. case 0xd:
  483. *freq = DDR_FREQ_533;
  484. break;
  485. case 0x8:
  486. case 0x11:
  487. case 0x14:
  488. *freq = DDR_FREQ_333;
  489. break;
  490. case 0xc:
  491. case 0x15:
  492. case 0x1b:
  493. *freq = DDR_FREQ_400;
  494. break;
  495. case 0x6:
  496. *freq = DDR_FREQ_300;
  497. break;
  498. case 0x12:
  499. *freq = DDR_FREQ_360;
  500. break;
  501. case 0x13:
  502. *freq = DDR_FREQ_400;
  503. break;
  504. default:
  505. *freq = 0;
  506. return MV_NOT_SUPPORTED;
  507. }
  508. return MV_OK;
  509. }
  510. u32 ddr3_tip_get_init_freq(void)
  511. {
  512. enum hws_ddr_freq freq;
  513. ddr3_tip_a38x_get_init_freq(0, &freq);
  514. return freq;
  515. }
  516. static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
  517. enum hws_ddr_freq frequency)
  518. {
  519. u32 divider = 0;
  520. u32 sar_val;
  521. if (if_id != 0) {
  522. DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
  523. ("A38x does not support interface 0x%x\n",
  524. if_id));
  525. return MV_BAD_PARAM;
  526. }
  527. /* get VCO freq index */
  528. sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
  529. RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
  530. RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
  531. divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
  532. /* Set Sync mode */
  533. CHECK_STATUS(ddr3_tip_a38x_if_write
  534. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
  535. 0x1000));
  536. CHECK_STATUS(ddr3_tip_a38x_if_write
  537. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
  538. 0x200));
  539. /* cpupll_clkdiv_reset_mask */
  540. CHECK_STATUS(ddr3_tip_a38x_if_write
  541. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
  542. 0xff));
  543. /* cpupll_clkdiv_reload_smooth */
  544. CHECK_STATUS(ddr3_tip_a38x_if_write
  545. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  546. (0x2 << 8), (0xff << 8)));
  547. /* cpupll_clkdiv_relax_en */
  548. CHECK_STATUS(ddr3_tip_a38x_if_write
  549. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
  550. (0x2 << 24), (0xff << 24)));
  551. /* write the divider */
  552. CHECK_STATUS(ddr3_tip_a38x_if_write
  553. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
  554. (divider << 8), (0x3f << 8)));
  555. /* set cpupll_clkdiv_reload_ratio */
  556. CHECK_STATUS(ddr3_tip_a38x_if_write
  557. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
  558. (1 << 8), (1 << 8)));
  559. /* undet cpupll_clkdiv_reload_ratio */
  560. CHECK_STATUS(ddr3_tip_a38x_if_write
  561. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  562. (1 << 8)));
  563. /* clear cpupll_clkdiv_reload_force */
  564. CHECK_STATUS(ddr3_tip_a38x_if_write
  565. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  566. (0xff << 8)));
  567. /* clear cpupll_clkdiv_relax_en */
  568. CHECK_STATUS(ddr3_tip_a38x_if_write
  569. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
  570. (0xff << 24)));
  571. /* clear cpupll_clkdiv_reset_mask */
  572. CHECK_STATUS(ddr3_tip_a38x_if_write
  573. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
  574. 0xff));
  575. /* Dunit training clock + 1:1 mode */
  576. if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
  577. CHECK_STATUS(ddr3_tip_a38x_if_write
  578. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  579. (1 << 16), (1 << 16)));
  580. CHECK_STATUS(ddr3_tip_a38x_if_write
  581. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  582. (0 << 15), (1 << 15)));
  583. } else {
  584. CHECK_STATUS(ddr3_tip_a38x_if_write
  585. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
  586. 0, (1 << 16)));
  587. CHECK_STATUS(ddr3_tip_a38x_if_write
  588. (dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
  589. (1 << 15), (1 << 15)));
  590. }
  591. return MV_OK;
  592. }
  593. /*
  594. * external read from memory
  595. */
  596. int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
  597. u32 num_of_bursts, u32 *data)
  598. {
  599. u32 burst_num;
  600. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  601. data[burst_num] = readl(reg_addr + 4 * burst_num);
  602. return MV_OK;
  603. }
  604. /*
  605. * external write to memory
  606. */
  607. int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
  608. u32 num_of_bursts, u32 *data) {
  609. u32 burst_num;
  610. for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
  611. writel(data[burst_num], reg_addr + 4 * burst_num);
  612. return MV_OK;
  613. }
  614. int ddr3_silicon_pre_init(void)
  615. {
  616. int result;
  617. result = ddr3_silicon_init();
  618. return result;
  619. }
  620. int ddr3_post_run_alg(void)
  621. {
  622. return MV_OK;
  623. }
  624. int ddr3_silicon_post_init(void)
  625. {
  626. struct hws_topology_map *tm = ddr3_get_topology_map();
  627. /* Set half bus width */
  628. if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
  629. CHECK_STATUS(ddr3_tip_if_write
  630. (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
  631. REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
  632. }
  633. return MV_OK;
  634. }
  635. int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
  636. {
  637. info_ptr->device_id = 0x6800;
  638. info_ptr->ck_delay = ck_delay;
  639. return MV_OK;
  640. }