ddr3_training_static.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539
  1. /*
  2. * Copyright (C) Marvell International Ltd. and its affiliates
  3. *
  4. * SPDX-License-Identifier: GPL-2.0
  5. */
  6. #include <common.h>
  7. #include <spl.h>
  8. #include <asm/io.h>
  9. #include <asm/arch/cpu.h>
  10. #include <asm/arch/soc.h>
  11. #include "ddr3_init.h"
  12. /* Design Guidelines parameters */
  13. u32 g_zpri_data = 123; /* controller data - P drive strength */
  14. u32 g_znri_data = 123; /* controller data - N drive strength */
  15. u32 g_zpri_ctrl = 74; /* controller C/A - P drive strength */
  16. u32 g_znri_ctrl = 74; /* controller C/A - N drive strength */
  17. u32 g_zpodt_data = 45; /* controller data - P ODT */
  18. u32 g_znodt_data = 45; /* controller data - N ODT */
  19. u32 g_zpodt_ctrl = 45; /* controller data - P ODT */
  20. u32 g_znodt_ctrl = 45; /* controller data - N ODT */
  21. u32 g_odt_config_2cs = 0x120012;
  22. u32 g_odt_config_1cs = 0x10000;
  23. u32 g_rtt_nom = 0x44;
  24. u32 g_dic = 0x2;
  25. #ifdef STATIC_ALGO_SUPPORT
  26. #define PARAM_NOT_CARE 0
  27. #define MAX_STATIC_SEQ 48
  28. u32 silicon_delay[HWS_MAX_DEVICE_NUM];
  29. struct hws_tip_static_config_info static_config[HWS_MAX_DEVICE_NUM];
  30. static reg_data *static_init_controller_config[HWS_MAX_DEVICE_NUM];
  31. /* debug delay in write leveling */
  32. int wl_debug_delay = 0;
  33. /* pup register #3 for functional board */
  34. int function_reg_value = 8;
  35. u32 silicon;
  36. u32 read_ready_delay_phase_offset[] = { 4, 4, 4, 4, 6, 6, 6, 6 };
  37. static struct cs_element chip_select_map[] = {
  38. /* CS Value (single only) Num_CS */
  39. {0, 0},
  40. {0, 1},
  41. {1, 1},
  42. {0, 2},
  43. {2, 1},
  44. {0, 2},
  45. {0, 2},
  46. {0, 3},
  47. {3, 1},
  48. {0, 2},
  49. {0, 2},
  50. {0, 3},
  51. {0, 2},
  52. {0, 3},
  53. {0, 3},
  54. {0, 4}
  55. };
  56. /*
  57. * Register static init controller DB
  58. */
  59. int ddr3_tip_init_specific_reg_config(u32 dev_num, reg_data *reg_config_arr)
  60. {
  61. static_init_controller_config[dev_num] = reg_config_arr;
  62. return MV_OK;
  63. }
  64. /*
  65. * Register static info DB
  66. */
  67. int ddr3_tip_init_static_config_db(
  68. u32 dev_num, struct hws_tip_static_config_info *static_config_info)
  69. {
  70. static_config[dev_num].board_trace_arr =
  71. static_config_info->board_trace_arr;
  72. static_config[dev_num].package_trace_arr =
  73. static_config_info->package_trace_arr;
  74. silicon_delay[dev_num] = static_config_info->silicon_delay;
  75. return MV_OK;
  76. }
  77. /*
  78. * Static round trip flow - Calculates the total round trip delay.
  79. */
  80. int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
  81. struct trip_delay_element *table_ptr,
  82. int is_wl, u32 *round_trip_delay_arr)
  83. {
  84. u32 bus_index, global_bus;
  85. u32 if_id;
  86. u32 bus_per_interface;
  87. int sign;
  88. u32 temp;
  89. u32 board_trace;
  90. struct trip_delay_element *pkg_delay_ptr;
  91. struct hws_topology_map *tm = ddr3_get_topology_map();
  92. /*
  93. * In WL we calc the diff between Clock to DQs in RL we sum the round
  94. * trip of Clock and DQs
  95. */
  96. sign = (is_wl) ? -1 : 1;
  97. bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  98. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  99. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  100. for (bus_index = 0; bus_index < bus_per_interface;
  101. bus_index++) {
  102. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  103. global_bus = (if_id * bus_per_interface) + bus_index;
  104. /* calculate total trip delay (package and board) */
  105. board_trace = (table_ptr[global_bus].dqs_delay * sign) +
  106. table_ptr[global_bus].ck_delay;
  107. temp = (board_trace * 163) / 1000;
  108. /* Convert the length to delay in psec units */
  109. pkg_delay_ptr =
  110. static_config[dev_num].package_trace_arr;
  111. round_trip_delay_arr[global_bus] = temp +
  112. (int)(pkg_delay_ptr[global_bus].dqs_delay *
  113. sign) +
  114. (int)pkg_delay_ptr[global_bus].ck_delay +
  115. (int)((is_wl == 1) ? wl_debug_delay :
  116. (int)silicon_delay[dev_num]);
  117. DEBUG_TRAINING_STATIC_IP(
  118. DEBUG_LEVEL_TRACE,
  119. ("Round Trip Build round_trip_delay_arr[0x%x]: 0x%x temp 0x%x\n",
  120. global_bus, round_trip_delay_arr[global_bus],
  121. temp));
  122. }
  123. }
  124. return MV_OK;
  125. }
  126. /*
  127. * Write leveling for static flow - calculating the round trip delay of the
  128. * DQS signal.
  129. */
  130. int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id,
  131. enum hws_ddr_freq frequency,
  132. u32 *round_trip_delay_arr)
  133. {
  134. u32 bus_index; /* index to the bus loop */
  135. u32 bus_start_index;
  136. u32 bus_per_interface;
  137. u32 phase = 0;
  138. u32 adll = 0, adll_cen, adll_inv, adll_final;
  139. u32 adll_period = MEGA / freq_val[frequency] / 64;
  140. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  141. ("ddr3_tip_write_leveling_static_config\n"));
  142. DEBUG_TRAINING_STATIC_IP(
  143. DEBUG_LEVEL_TRACE,
  144. ("dev_num 0x%x IF 0x%x freq %d (adll_period 0x%x)\n",
  145. dev_num, if_id, frequency, adll_period));
  146. bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  147. bus_start_index = if_id * bus_per_interface;
  148. for (bus_index = bus_start_index;
  149. bus_index < (bus_start_index + bus_per_interface); bus_index++) {
  150. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  151. phase = round_trip_delay_arr[bus_index] / (32 * adll_period);
  152. adll = (round_trip_delay_arr[bus_index] -
  153. (phase * 32 * adll_period)) / adll_period;
  154. adll = (adll > 31) ? 31 : adll;
  155. adll_cen = 16 + adll;
  156. adll_inv = adll_cen / 32;
  157. adll_final = adll_cen - (adll_inv * 32);
  158. adll_final = (adll_final > 31) ? 31 : adll_final;
  159. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  160. ("\t%d - phase 0x%x adll 0x%x\n",
  161. bus_index, phase, adll));
  162. /*
  163. * Writing to all 4 phy of Interface number,
  164. * bit 0 \96 4 \96 ADLL, bit 6-8 phase
  165. */
  166. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  167. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  168. (bus_index % 4), DDR_PHY_DATA,
  169. PHY_WRITE_DELAY(cs),
  170. ((phase << 6) + (adll & 0x1f)), 0x1df));
  171. CHECK_STATUS(ddr3_tip_bus_write
  172. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  173. ACCESS_TYPE_UNICAST, (bus_index % 4),
  174. DDR_PHY_DATA, WRITE_CENTRALIZATION_PHY_REG,
  175. ((adll_inv & 0x1) << 5) + adll_final));
  176. }
  177. return MV_OK;
  178. }
  179. /*
  180. * Read leveling for static flow
  181. */
  182. int ddr3_tip_read_leveling_static_config(u32 dev_num,
  183. u32 if_id,
  184. enum hws_ddr_freq frequency,
  185. u32 *total_round_trip_delay_arr)
  186. {
  187. u32 cs, data0, data1, data3 = 0;
  188. u32 bus_index; /* index to the bus loop */
  189. u32 bus_start_index;
  190. u32 phase0, phase1, max_phase;
  191. u32 adll0, adll1;
  192. u32 cl_value;
  193. u32 min_delay;
  194. u32 sdr_period = MEGA / freq_val[frequency];
  195. u32 ddr_period = MEGA / freq_val[frequency] / 2;
  196. u32 adll_period = MEGA / freq_val[frequency] / 64;
  197. enum hws_speed_bin speed_bin_index;
  198. u32 rd_sample_dly[MAX_CS_NUM] = { 0 };
  199. u32 rd_ready_del[MAX_CS_NUM] = { 0 };
  200. u32 bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
  201. struct hws_topology_map *tm = ddr3_get_topology_map();
  202. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  203. ("ddr3_tip_read_leveling_static_config\n"));
  204. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  205. ("dev_num 0x%x ifc 0x%x freq %d\n", dev_num,
  206. if_id, frequency));
  207. DEBUG_TRAINING_STATIC_IP(
  208. DEBUG_LEVEL_TRACE,
  209. ("Sdr_period 0x%x Ddr_period 0x%x adll_period 0x%x\n",
  210. sdr_period, ddr_period, adll_period));
  211. if (tm->interface_params[first_active_if].memory_freq ==
  212. frequency) {
  213. cl_value = tm->interface_params[first_active_if].cas_l;
  214. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  215. ("cl_value 0x%x\n", cl_value));
  216. } else {
  217. speed_bin_index = tm->interface_params[if_id].speed_bin_index;
  218. cl_value = cas_latency_table[speed_bin_index].cl_val[frequency];
  219. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  220. ("cl_value 0x%x speed_bin_index %d\n",
  221. cl_value, speed_bin_index));
  222. }
  223. bus_start_index = if_id * bus_per_interface;
  224. for (bus_index = bus_start_index;
  225. bus_index < (bus_start_index + bus_per_interface);
  226. bus_index += 2) {
  227. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  228. cs = chip_select_map[
  229. tm->interface_params[if_id].as_bus_params[
  230. (bus_index % 4)].cs_bitmask].cs_num;
  231. /* read sample delay calculation */
  232. min_delay = (total_round_trip_delay_arr[bus_index] <
  233. total_round_trip_delay_arr[bus_index + 1]) ?
  234. total_round_trip_delay_arr[bus_index] :
  235. total_round_trip_delay_arr[bus_index + 1];
  236. /* round down */
  237. rd_sample_dly[cs] = 2 * (min_delay / (sdr_period * 2));
  238. DEBUG_TRAINING_STATIC_IP(
  239. DEBUG_LEVEL_TRACE,
  240. ("\t%d - min_delay 0x%x cs 0x%x rd_sample_dly[cs] 0x%x\n",
  241. bus_index, min_delay, cs, rd_sample_dly[cs]));
  242. /* phase calculation */
  243. phase0 = (total_round_trip_delay_arr[bus_index] -
  244. (sdr_period * rd_sample_dly[cs])) / (ddr_period);
  245. phase1 = (total_round_trip_delay_arr[bus_index + 1] -
  246. (sdr_period * rd_sample_dly[cs])) / (ddr_period);
  247. max_phase = (phase0 > phase1) ? phase0 : phase1;
  248. DEBUG_TRAINING_STATIC_IP(
  249. DEBUG_LEVEL_TRACE,
  250. ("\tphase0 0x%x phase1 0x%x max_phase 0x%x\n",
  251. phase0, phase1, max_phase));
  252. /* ADLL calculation */
  253. adll0 = (u32)((total_round_trip_delay_arr[bus_index] -
  254. (sdr_period * rd_sample_dly[cs]) -
  255. (ddr_period * phase0)) / adll_period);
  256. adll0 = (adll0 > 31) ? 31 : adll0;
  257. adll1 = (u32)((total_round_trip_delay_arr[bus_index + 1] -
  258. (sdr_period * rd_sample_dly[cs]) -
  259. (ddr_period * phase1)) / adll_period);
  260. adll1 = (adll1 > 31) ? 31 : adll1;
  261. /* The Read delay close the Read FIFO */
  262. rd_ready_del[cs] = rd_sample_dly[cs] +
  263. read_ready_delay_phase_offset[max_phase];
  264. DEBUG_TRAINING_STATIC_IP(
  265. DEBUG_LEVEL_TRACE,
  266. ("\tadll0 0x%x adll1 0x%x rd_ready_del[cs] 0x%x\n",
  267. adll0, adll1, rd_ready_del[cs]));
  268. /*
  269. * Write to the phy of Interface (bit 0 \96 4 \96 ADLL,
  270. * bit 6-8 phase)
  271. */
  272. data0 = ((phase0 << 6) + (adll0 & 0x1f));
  273. data1 = ((phase1 << 6) + (adll1 & 0x1f));
  274. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  275. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  276. (bus_index % 4), DDR_PHY_DATA, PHY_READ_DELAY(cs),
  277. data0, 0x1df));
  278. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  279. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  280. ((bus_index + 1) % 4), DDR_PHY_DATA,
  281. PHY_READ_DELAY(cs), data1, 0x1df));
  282. }
  283. for (bus_index = 0; bus_index < bus_per_interface; bus_index++) {
  284. VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
  285. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  286. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  287. bus_index, DDR_PHY_DATA, 0x3, data3, 0x1f));
  288. }
  289. CHECK_STATUS(ddr3_tip_if_write
  290. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  291. READ_DATA_SAMPLE_DELAY,
  292. (rd_sample_dly[0] + cl_value) + (rd_sample_dly[1] << 8),
  293. MASK_ALL_BITS));
  294. /* Read_ready_del0 bit 0-4 , CS bits 8-12 */
  295. CHECK_STATUS(ddr3_tip_if_write
  296. (dev_num, ACCESS_TYPE_UNICAST, if_id,
  297. READ_DATA_READY_DELAY,
  298. rd_ready_del[0] + (rd_ready_del[1] << 8) + cl_value,
  299. MASK_ALL_BITS));
  300. return MV_OK;
  301. }
  302. /*
  303. * DDR3 Static flow
  304. */
  305. int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq)
  306. {
  307. u32 if_id = 0;
  308. struct trip_delay_element *table_ptr;
  309. u32 wl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
  310. u32 rl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
  311. struct init_cntr_param init_cntr_prm;
  312. int ret;
  313. struct hws_topology_map *tm = ddr3_get_topology_map();
  314. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  315. ("ddr3_tip_run_static_alg"));
  316. init_cntr_prm.do_mrs_phy = 1;
  317. init_cntr_prm.is_ctrl64_bit = 0;
  318. init_cntr_prm.init_phy = 1;
  319. ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm);
  320. if (ret != MV_OK) {
  321. DEBUG_TRAINING_STATIC_IP(
  322. DEBUG_LEVEL_ERROR,
  323. ("hws_ddr3_tip_init_controller failure\n"));
  324. }
  325. /* calculate the round trip delay for Write Leveling */
  326. table_ptr = static_config[dev_num].board_trace_arr;
  327. CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
  328. (dev_num, table_ptr, 1,
  329. wl_total_round_trip_delay_arr));
  330. /* calculate the round trip delay for Read Leveling */
  331. CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
  332. (dev_num, table_ptr, 0,
  333. rl_total_round_trip_delay_arr));
  334. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  335. /* check if the interface is enabled */
  336. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  337. /*
  338. * Static frequency is defined according to init-frequency
  339. * (not target)
  340. */
  341. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  342. ("Static IF %d freq %d\n",
  343. if_id, freq));
  344. CHECK_STATUS(ddr3_tip_write_leveling_static_config
  345. (dev_num, if_id, freq,
  346. wl_total_round_trip_delay_arr));
  347. CHECK_STATUS(ddr3_tip_read_leveling_static_config
  348. (dev_num, if_id, freq,
  349. rl_total_round_trip_delay_arr));
  350. }
  351. return MV_OK;
  352. }
  353. /*
  354. * Init controller for static flow
  355. */
  356. int ddr3_tip_static_init_controller(u32 dev_num)
  357. {
  358. u32 index_cnt = 0;
  359. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  360. ("ddr3_tip_static_init_controller\n"));
  361. while (static_init_controller_config[dev_num][index_cnt].reg_addr !=
  362. 0) {
  363. CHECK_STATUS(ddr3_tip_if_write
  364. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  365. static_init_controller_config[dev_num][index_cnt].
  366. reg_addr,
  367. static_init_controller_config[dev_num][index_cnt].
  368. reg_data,
  369. static_init_controller_config[dev_num][index_cnt].
  370. reg_mask));
  371. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  372. ("Init_controller index_cnt %d\n",
  373. index_cnt));
  374. index_cnt++;
  375. }
  376. return MV_OK;
  377. }
  378. int ddr3_tip_static_phy_init_controller(u32 dev_num)
  379. {
  380. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  381. ("Phy Init Controller 2\n"));
  382. CHECK_STATUS(ddr3_tip_bus_write
  383. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  384. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa4,
  385. 0x3dfe));
  386. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  387. ("Phy Init Controller 3\n"));
  388. CHECK_STATUS(ddr3_tip_bus_write
  389. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  390. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa6,
  391. 0xcb2));
  392. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  393. ("Phy Init Controller 4\n"));
  394. CHECK_STATUS(ddr3_tip_bus_write
  395. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  396. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa9,
  397. 0));
  398. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  399. ("Static Receiver Calibration\n"));
  400. CHECK_STATUS(ddr3_tip_bus_write
  401. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  402. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xd0,
  403. 0x1f));
  404. DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
  405. ("Static V-REF Calibration\n"));
  406. CHECK_STATUS(ddr3_tip_bus_write
  407. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  408. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa8,
  409. 0x434));
  410. return MV_OK;
  411. }
  412. #endif
  413. /*
  414. * Configure phy (called by static init controller) for static flow
  415. */
  416. int ddr3_tip_configure_phy(u32 dev_num)
  417. {
  418. u32 if_id, phy_id;
  419. struct hws_topology_map *tm = ddr3_get_topology_map();
  420. CHECK_STATUS(ddr3_tip_bus_write
  421. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  422. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  423. PAD_ZRI_CALIB_PHY_REG,
  424. ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
  425. CHECK_STATUS(ddr3_tip_bus_write
  426. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  427. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  428. PAD_ZRI_CALIB_PHY_REG,
  429. ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
  430. CHECK_STATUS(ddr3_tip_bus_write
  431. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  432. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  433. PAD_ODT_CALIB_PHY_REG,
  434. ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
  435. CHECK_STATUS(ddr3_tip_bus_write
  436. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  437. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  438. PAD_ODT_CALIB_PHY_REG,
  439. ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
  440. CHECK_STATUS(ddr3_tip_bus_write
  441. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  442. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  443. PAD_PRE_DISABLE_PHY_REG, 0));
  444. CHECK_STATUS(ddr3_tip_bus_write
  445. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  446. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
  447. CMOS_CONFIG_PHY_REG, 0));
  448. CHECK_STATUS(ddr3_tip_bus_write
  449. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  450. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
  451. CMOS_CONFIG_PHY_REG, 0));
  452. for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
  453. /* check if the interface is enabled */
  454. VALIDATE_ACTIVE(tm->if_act_mask, if_id);
  455. for (phy_id = 0;
  456. phy_id < tm->num_of_bus_per_interface;
  457. phy_id++) {
  458. VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
  459. /* Vref & clamp */
  460. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  461. (dev_num, ACCESS_TYPE_UNICAST,
  462. if_id, phy_id, DDR_PHY_DATA,
  463. PAD_CONFIG_PHY_REG,
  464. ((clamp_tbl[if_id] << 4) | vref),
  465. ((0x7 << 4) | 0x7)));
  466. /* clamp not relevant for control */
  467. CHECK_STATUS(ddr3_tip_bus_read_modify_write
  468. (dev_num, ACCESS_TYPE_UNICAST,
  469. if_id, phy_id, DDR_PHY_CONTROL,
  470. PAD_CONFIG_PHY_REG, 0x4, 0x7));
  471. }
  472. }
  473. CHECK_STATUS(ddr3_tip_bus_write
  474. (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
  475. ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0x90,
  476. 0x6002));
  477. return MV_OK;
  478. }