ddr3_training_static.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. /* Design Guidelines parameters */
  13. u32 g_zpri_data = 123; /* controller data - P drive strength */
  14. u32 g_znri_data = 123; /* controller data - N drive strength */
  15. u32 g_zpri_ctrl = 74; /* controller C/A - P drive strength */
  16. u32 g_znri_ctrl = 74; /* controller C/A - N drive strength */
  17. u32 g_zpodt_data = 45; /* controller data - P ODT */
  18. u32 g_znodt_data = 45; /* controller data - N ODT */
  19. u32 g_zpodt_ctrl = 45; /* controller data - P ODT */
  20. u32 g_znodt_ctrl = 45; /* controller data - N ODT */
  21. u32 g_odt_config = 0x120012;
  22. u32 g_rtt_nom = 0x44;
  23. u32 g_dic = 0x2;
  24. #ifdef STATIC_ALGO_SUPPORT
  25. #define PARAM_NOT_CARE 0
  26. #define MAX_STATIC_SEQ 48
  27. u32 silicon_delay[HWS_MAX_DEVICE_NUM];
  28. struct hws_tip_static_config_info static_config[HWS_MAX_DEVICE_NUM];
  29. static reg_data *static_init_controller_config[HWS_MAX_DEVICE_NUM];
  30. /* debug delay in write leveling */
  31. int wl_debug_delay = 0;
  32. /* pup register #3 for functional board */
  33. int function_reg_value = 8;
  34. u32 silicon;
  35. u32 read_ready_delay_phase_offset[] = { 4, 4, 4, 4, 6, 6, 6, 6 };
  36. static struct cs_element chip_select_map[] = {
  37. /* CS Value (single only) Num_CS */
  38. {0, 0},
  39. {0, 1},
  40. {1, 1},
  41. {0, 2},
  42. {2, 1},
  43. {0, 2},
  44. {0, 2},
  45. {0, 3},
  46. {3, 1},
  47. {0, 2},
  48. {0, 2},
  49. {0, 3},
  50. {0, 2},
  51. {0, 3},
  52. {0, 3},
  53. {0, 4}
  54. };
  55. /*
  56. * Register static init controller DB
  57. */
  58. int ddr3_tip_init_specific_reg_config(u32 dev_num, reg_data *reg_config_arr)
  59. {
  60. static_init_controller_config[dev_num] = reg_config_arr;
  61. return MV_OK;
  62. }
  63. /*
  64. * Register static info DB
  65. */
  66. int ddr3_tip_init_static_config_db(
  67. u32 dev_num, struct hws_tip_static_config_info *static_config_info)
  68. {
  69. static_config[dev_num].board_trace_arr =
  70. static_config_info->board_trace_arr;
  71. static_config[dev_num].package_trace_arr =
  72. static_config_info->package_trace_arr;
  73. silicon_delay[dev_num] = static_config_info->silicon_delay;
  74. return MV_OK;
  75. }
  76. /*
  77. * Static round trip flow - Calculates the total round trip delay.
  78. */
  79. int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
  80. struct trip_delay_element *table_ptr,
  81. int is_wl, u32 *round_trip_delay_arr)
  82. {
  83. u32 bus_index, global_bus;
  84. u32 if_id;
  85. u32 bus_per_interface;
  86. int sign;
  87. u32 temp;
  88. u32 board_trace;
  89. struct trip_delay_element *pkg_delay_ptr;
  90. struct hws_topology_map *tm = ddr3_get_topology_map();
  91. /*
  92. * In WL we calc the diff between Clock to DQs in RL we sum the round
  93. * trip of Clock and DQs
  94. */
  95. sign = (is_wl) ? -1 : 1;
  96. bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  97. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  98. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  99. for (bus_index = 0; bus_index < bus_per_interface;
  100. bus_index++) {
  101. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  102. global_bus = (if_id * bus_per_interface) + bus_index;
  103. /* calculate total trip delay (package and board) */
  104. board_trace = (table_ptr[global_bus].dqs_delay * sign) +
  105. table_ptr[global_bus].ck_delay;
  106. temp = (board_trace * 163) / 1000;
  107. /* Convert the length to delay in psec units */
  108. pkg_delay_ptr =
  109. static_config[dev_num].package_trace_arr;
  110. round_trip_delay_arr[global_bus] = temp +
  111. (int)(pkg_delay_ptr[global_bus].dqs_delay *
  112. sign) +
  113. (int)pkg_delay_ptr[global_bus].ck_delay +
  114. (int)((is_wl == 1) ? wl_debug_delay :
  115. (int)silicon_delay[dev_num]);
  116. DEBUG_TRAINING_STATIC_IP(
  117. DEBUG_LEVEL_TRACE,
  118. ("Round Trip Build round_trip_delay_arr[0x%x]: 0x%x temp 0x%x\n",
  119. global_bus, round_trip_delay_arr[global_bus],
  120. temp));
  121. }
  122. }
  123. return MV_OK;
  124. }
  125. /*
  126. * Write leveling for static flow - calculating the round trip delay of the
  127. * DQS signal.
  128. */
  129. int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id,
  130. enum hws_ddr_freq frequency,
  131. u32 *round_trip_delay_arr)
  132. {
  133. u32 bus_index; /* index to the bus loop */
  134. u32 bus_start_index;
  135. u32 bus_per_interface;
  136. u32 phase = 0;
  137. u32 adll = 0, adll_cen, adll_inv, adll_final;
  138. u32 adll_period = MEGA / freq_val[frequency] / 64;
  139. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  140. ("ddr3_tip_write_leveling_static_config\n"));
  141. DEBUG_TRAINING_STATIC_IP(
  142. DEBUG_LEVEL_TRACE,
  143. ("dev_num 0x%x IF 0x%x freq %d (adll_period 0x%x)\n",
  144. dev_num, if_id, frequency, adll_period));
  145. bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  146. bus_start_index = if_id * bus_per_interface;
  147. for (bus_index = bus_start_index;
  148. bus_index < (bus_start_index + bus_per_interface); bus_index++) {
  149. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  150. phase = round_trip_delay_arr[bus_index] / (32 * adll_period);
  151. adll = (round_trip_delay_arr[bus_index] -
  152. (phase * 32 * adll_period)) / adll_period;
  153. adll = (adll > 31) ? 31 : adll;
  154. adll_cen = 16 + adll;
  155. adll_inv = adll_cen / 32;
  156. adll_final = adll_cen - (adll_inv * 32);
  157. adll_final = (adll_final > 31) ? 31 : adll_final;
  158. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  159. ("\t%d - phase 0x%x adll 0x%x\n",
  160. bus_index, phase, adll));
  161. /*
  162. * Writing to all 4 phy of Interface number,
  163. * bit 0 \96 4 \96 ADLL, bit 6-8 phase
  164. */
  165. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  166. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  167. (bus_index % 4), DDR_PHY_DATA,
  168. PHY_WRITE_DELAY(cs),
  169. ((phase << 6) + (adll & 0x1f)), 0x1df));
  170. CHECK_STATUS(ddr3_tip_bus_write
  171. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  172. ACCESS_TYPE_UNICAST, (bus_index % 4),
  173. DDR_PHY_DATA, WRITE_CENTRALIZATION_PHY_REG,
  174. ((adll_inv & 0x1) << 5) + adll_final));
  175. }
  176. return MV_OK;
  177. }
  178. /*
  179. * Read leveling for static flow
  180. */
  181. int ddr3_tip_read_leveling_static_config(u32 dev_num,
  182. u32 if_id,
  183. enum hws_ddr_freq frequency,
  184. u32 *total_round_trip_delay_arr)
  185. {
  186. u32 cs, data0, data1, data3 = 0;
  187. u32 bus_index; /* index to the bus loop */
  188. u32 bus_start_index;
  189. u32 phase0, phase1, max_phase;
  190. u32 adll0, adll1;
  191. u32 cl_value;
  192. u32 min_delay;
  193. u32 sdr_period = MEGA / freq_val[frequency];
  194. u32 ddr_period = MEGA / freq_val[frequency] / 2;
  195. u32 adll_period = MEGA / freq_val[frequency] / 64;
  196. enum hws_speed_bin speed_bin_index;
  197. u32 rd_sample_dly[MAX_CS_NUM] = { 0 };
  198. u32 rd_ready_del[MAX_CS_NUM] = { 0 };
  199. u32 bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  200. struct hws_topology_map *tm = ddr3_get_topology_map();
  201. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  202. ("ddr3_tip_read_leveling_static_config\n"));
  203. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  204. ("dev_num 0x%x ifc 0x%x freq %d\n", dev_num,
  205. if_id, frequency));
  206. DEBUG_TRAINING_STATIC_IP(
  207. DEBUG_LEVEL_TRACE,
  208. ("Sdr_period 0x%x Ddr_period 0x%x adll_period 0x%x\n",
  209. sdr_period, ddr_period, adll_period));
  210. if (tm->interface_params[first_active_if].memory_freq ==
  211. frequency) {
  212. cl_value = tm->interface_params[first_active_if].cas_l;
  213. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  214. ("cl_value 0x%x\n", cl_value));
  215. } else {
  216. speed_bin_index = tm->interface_params[if_id].speed_bin_index;
  217. cl_value = cas_latency_table[speed_bin_index].cl_val[frequency];
  218. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  219. ("cl_value 0x%x speed_bin_index %d\n",
  220. cl_value, speed_bin_index));
  221. }
  222. bus_start_index = if_id * bus_per_interface;
  223. for (bus_index = bus_start_index;
  224. bus_index < (bus_start_index + bus_per_interface);
  225. bus_index += 2) {
  226. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  227. cs = chip_select_map[
  228. tm->interface_params[if_id].as_bus_params[
  229. (bus_index % 4)].cs_bitmask].cs_num;
  230. /* read sample delay calculation */
  231. min_delay = (total_round_trip_delay_arr[bus_index] <
  232. total_round_trip_delay_arr[bus_index + 1]) ?
  233. total_round_trip_delay_arr[bus_index] :
  234. total_round_trip_delay_arr[bus_index + 1];
  235. /* round down */
  236. rd_sample_dly[cs] = 2 * (min_delay / (sdr_period * 2));
  237. DEBUG_TRAINING_STATIC_IP(
  238. DEBUG_LEVEL_TRACE,
  239. ("\t%d - min_delay 0x%x cs 0x%x rd_sample_dly[cs] 0x%x\n",
  240. bus_index, min_delay, cs, rd_sample_dly[cs]));
  241. /* phase calculation */
  242. phase0 = (total_round_trip_delay_arr[bus_index] -
  243. (sdr_period * rd_sample_dly[cs])) / (ddr_period);
  244. phase1 = (total_round_trip_delay_arr[bus_index + 1] -
  245. (sdr_period * rd_sample_dly[cs])) / (ddr_period);
  246. max_phase = (phase0 > phase1) ? phase0 : phase1;
  247. DEBUG_TRAINING_STATIC_IP(
  248. DEBUG_LEVEL_TRACE,
  249. ("\tphase0 0x%x phase1 0x%x max_phase 0x%x\n",
  250. phase0, phase1, max_phase));
  251. /* ADLL calculation */
  252. adll0 = (u32)((total_round_trip_delay_arr[bus_index] -
  253. (sdr_period * rd_sample_dly[cs]) -
  254. (ddr_period * phase0)) / adll_period);
  255. adll0 = (adll0 > 31) ? 31 : adll0;
  256. adll1 = (u32)((total_round_trip_delay_arr[bus_index + 1] -
  257. (sdr_period * rd_sample_dly[cs]) -
  258. (ddr_period * phase1)) / adll_period);
  259. adll1 = (adll1 > 31) ? 31 : adll1;
  260. /* The Read delay close the Read FIFO */
  261. rd_ready_del[cs] = rd_sample_dly[cs] +
  262. read_ready_delay_phase_offset[max_phase];
  263. DEBUG_TRAINING_STATIC_IP(
  264. DEBUG_LEVEL_TRACE,
  265. ("\tadll0 0x%x adll1 0x%x rd_ready_del[cs] 0x%x\n",
  266. adll0, adll1, rd_ready_del[cs]));
  267. /*
  268. * Write to the phy of Interface (bit 0 \96 4 \96 ADLL,
  269. * bit 6-8 phase)
  270. */
  271. data0 = ((phase0 << 6) + (adll0 & 0x1f));
  272. data1 = ((phase1 << 6) + (adll1 & 0x1f));
  273. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  274. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  275. (bus_index % 4), DDR_PHY_DATA, PHY_READ_DELAY(cs),
  276. data0, 0x1df));
  277. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  278. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  279. ((bus_index + 1) % 4), DDR_PHY_DATA,
  280. PHY_READ_DELAY(cs), data1, 0x1df));
  281. }
  282. for (bus_index = 0; bus_index < bus_per_interface; bus_index++) {
  283. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  284. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  285. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  286. bus_index, DDR_PHY_DATA, 0x3, data3, 0x1f));
  287. }
  288. CHECK_STATUS(ddr3_tip_if_write
  289. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  290. READ_DATA_SAMPLE_DELAY,
  291. (rd_sample_dly[0] + cl_value) + (rd_sample_dly[1] << 8),
  292. MASK_ALL_BITS));
  293. /* Read_ready_del0 bit 0-4 , CS bits 8-12 */
  294. CHECK_STATUS(ddr3_tip_if_write
  295. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  296. READ_DATA_READY_DELAY,
  297. rd_ready_del[0] + (rd_ready_del[1] << 8) + cl_value,
  298. MASK_ALL_BITS));
  299. return MV_OK;
  300. }
  301. /*
  302. * DDR3 Static flow
  303. */
  304. int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq)
  305. {
  306. u32 if_id = 0;
  307. struct trip_delay_element *table_ptr;
  308. u32 wl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
  309. u32 rl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
  310. struct init_cntr_param init_cntr_prm;
  311. int ret;
  312. struct hws_topology_map *tm = ddr3_get_topology_map();
  313. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  314. ("ddr3_tip_run_static_alg"));
  315. init_cntr_prm.do_mrs_phy = 1;
  316. init_cntr_prm.is_ctrl64_bit = 0;
  317. init_cntr_prm.init_phy = 1;
  318. ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm);
  319. if (ret != MV_OK) {
  320. DEBUG_TRAINING_STATIC_IP(
  321. DEBUG_LEVEL_ERROR,
  322. ("hws_ddr3_tip_init_controller failure\n"));
  323. }
  324. /* calculate the round trip delay for Write Leveling */
  325. table_ptr = static_config[dev_num].board_trace_arr;
  326. CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
  327. (dev_num, table_ptr, 1,
  328. wl_total_round_trip_delay_arr));
  329. /* calculate the round trip delay for Read Leveling */
  330. CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
  331. (dev_num, table_ptr, 0,
  332. rl_total_round_trip_delay_arr));
  333. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  334. /* check if the interface is enabled */
  335. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  336. /*
  337. * Static frequency is defined according to init-frequency
  338. * (not target)
  339. */
  340. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  341. ("Static IF %d freq %d\n",
  342. if_id, freq));
  343. CHECK_STATUS(ddr3_tip_write_leveling_static_config
  344. (dev_num, if_id, freq,
  345. wl_total_round_trip_delay_arr));
  346. CHECK_STATUS(ddr3_tip_read_leveling_static_config
  347. (dev_num, if_id, freq,
  348. rl_total_round_trip_delay_arr));
  349. }
  350. return MV_OK;
  351. }
  352. /*
  353. * Init controller for static flow
  354. */
  355. int ddr3_tip_static_init_controller(u32 dev_num)
  356. {
  357. u32 index_cnt = 0;
  358. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  359. ("ddr3_tip_static_init_controller\n"));
  360. while (static_init_controller_config[dev_num][index_cnt].reg_addr !=
  361. 0) {
  362. CHECK_STATUS(ddr3_tip_if_write
  363. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  364. static_init_controller_config[dev_num][index_cnt].
  365. reg_addr,
  366. static_init_controller_config[dev_num][index_cnt].
  367. reg_data,
  368. static_init_controller_config[dev_num][index_cnt].
  369. reg_mask));
  370. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  371. ("Init_controller index_cnt %d\n",
  372. index_cnt));
  373. index_cnt++;
  374. }
  375. return MV_OK;
  376. }
  377. int ddr3_tip_static_phy_init_controller(u32 dev_num)
  378. {
  379. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  380. ("Phy Init Controller 2\n"));
  381. CHECK_STATUS(ddr3_tip_bus_write
  382. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  383. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa4,
  384. 0x3dfe));
  385. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  386. ("Phy Init Controller 3\n"));
  387. CHECK_STATUS(ddr3_tip_bus_write
  388. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  389. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa6,
  390. 0xcb2));
  391. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  392. ("Phy Init Controller 4\n"));
  393. CHECK_STATUS(ddr3_tip_bus_write
  394. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  395. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa9,
  396. 0));
  397. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  398. ("Static Receiver Calibration\n"));
  399. CHECK_STATUS(ddr3_tip_bus_write
  400. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  401. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xd0,
  402. 0x1f));
  403. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  404. ("Static V-REF Calibration\n"));
  405. CHECK_STATUS(ddr3_tip_bus_write
  406. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  407. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa8,
  408. 0x434));
  409. return MV_OK;
  410. }
  411. #endif
  412. /*
  413. * Configure phy (called by static init controller) for static flow
  414. */
  415. int ddr3_tip_configure_phy(u32 dev_num)
  416. {
  417. u32 if_id, phy_id;
  418. struct hws_topology_map *tm = ddr3_get_topology_map();
  419. CHECK_STATUS(ddr3_tip_bus_write
  420. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  421. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  422. PAD_ZRI_CALIB_PHY_REG,
  423. ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
  424. CHECK_STATUS(ddr3_tip_bus_write
  425. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  426. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  427. PAD_ZRI_CALIB_PHY_REG,
  428. ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
  429. CHECK_STATUS(ddr3_tip_bus_write
  430. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  431. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  432. PAD_ODT_CALIB_PHY_REG,
  433. ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
  434. CHECK_STATUS(ddr3_tip_bus_write
  435. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  436. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  437. PAD_ODT_CALIB_PHY_REG,
  438. ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
  439. CHECK_STATUS(ddr3_tip_bus_write
  440. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  441. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  442. PAD_PRE_DISABLE_PHY_REG, 0));
  443. CHECK_STATUS(ddr3_tip_bus_write
  444. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  445. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  446. CMOS_CONFIG_PHY_REG, 0));
  447. CHECK_STATUS(ddr3_tip_bus_write
  448. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  449. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  450. CMOS_CONFIG_PHY_REG, 0));
  451. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  452. /* check if the interface is enabled */
  453. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  454. for (phy_id = 0;
  455. phy_id < tm->num_of_bus_per_interface;
  456. phy_id++) {
  457. VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
  458. /* Vref & clamp */
  459. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  460. (dev_num, ACCESS_TYPE_UNICAST,
  461. if_id, phy_id, DDR_PHY_DATA,
  462. PAD_CONFIG_PHY_REG,
  463. ((clamp_tbl[if_id] << 4) | vref),
  464. ((0x7 << 4) | 0x7)));
  465. /* clamp not relevant for control */
  466. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  467. (dev_num, ACCESS_TYPE_UNICAST,
  468. if_id, phy_id, DDR_PHY_CONTROL,
  469. PAD_CONFIG_PHY_REG, 0x4, 0x7));
  470. }
  471. }
  472. CHECK_STATUS(ddr3_tip_bus_write
  473. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  474. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0x90,
  475. 0x6002));
  476. return MV_OK;
  477. }