xor.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) Marvell International Ltd. and its affiliates
  4. */
  5. #include "ddr3_init.h"
  6. #include "xor_regs.h"
  7. /* defines */
  8. #ifdef MV_DEBUG
  9. #define DB(x) x
  10. #else
  11. #define DB(x)
  12. #endif
  13. static u32 ui_xor_regs_ctrl_backup;
  14. static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1];
  15. static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1];
  16. void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta)
  17. {
  18. u32 reg, ui, cs_count;
  19. uint64_t base, size_mask;
  20. ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
  21. for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
  22. ui_xor_regs_base_backup[ui] =
  23. reg_read(XOR_BASE_ADDR_REG(0, ui));
  24. for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
  25. ui_xor_regs_mask_backup[ui] =
  26. reg_read(XOR_SIZE_MASK_REG(0, ui));
  27. reg = 0;
  28. for (ui = 0, cs_count = 0;
  29. (cs_count < num_of_cs) && (ui < 8);
  30. ui++, cs_count++) {
  31. if (cs_ena & (1 << ui)) {
  32. /* Enable Window x for each CS */
  33. reg |= (0x1 << (ui));
  34. /* Enable Window x for each CS */
  35. reg |= (0x3 << ((ui * 2) + 16));
  36. }
  37. }
  38. reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
  39. cs_count = 0;
  40. for (ui = 0, cs_count = 0;
  41. (cs_count < num_of_cs) && (ui < 8);
  42. ui++, cs_count++) {
  43. if (cs_ena & (1 << ui)) {
  44. /*
  45. * window x - Base - 0x00000000,
  46. * Attribute 0x0e - DRAM
  47. */
  48. base = cs_size * ui + base_delta;
  49. /* fixed size 2GB for each CS */
  50. size_mask = 0x7FFF0000;
  51. switch (ui) {
  52. case 0:
  53. base |= 0xe00;
  54. break;
  55. case 1:
  56. base |= 0xd00;
  57. break;
  58. case 2:
  59. base |= 0xb00;
  60. break;
  61. case 3:
  62. base |= 0x700;
  63. break;
  64. case 4: /* SRAM */
  65. base = 0x40000000;
  66. /* configure as shared transaction */
  67. base |= 0x1F00;
  68. size_mask = 0xF0000;
  69. break;
  70. }
  71. reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base);
  72. size_mask = (cs_size / _64K) - 1;
  73. size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK;
  74. /* window x - Size */
  75. reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask);
  76. }
  77. }
  78. mv_xor_hal_init(1);
  79. return;
  80. }
  81. void mv_sys_xor_finish(void)
  82. {
  83. u32 ui;
  84. reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
  85. for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
  86. reg_write(XOR_BASE_ADDR_REG(0, ui),
  87. ui_xor_regs_base_backup[ui]);
  88. for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
  89. reg_write(XOR_SIZE_MASK_REG(0, ui),
  90. ui_xor_regs_mask_backup[ui]);
  91. reg_write(XOR_ADDR_OVRD_REG(0, 0), 0);
  92. }
  93. /*
  94. * mv_xor_hal_init - Initialize XOR engine
  95. *
  96. * DESCRIPTION:
  97. * This function initialize XOR unit.
  98. * INPUT:
  99. * None.
  100. *
  101. * OUTPUT:
  102. * None.
  103. *
  104. * RETURN:
  105. * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
  106. */
  107. void mv_xor_hal_init(u32 xor_chan_num)
  108. {
  109. u32 i;
  110. /* Abort any XOR activity & set default configuration */
  111. for (i = 0; i < xor_chan_num; i++) {
  112. mv_xor_command_set(i, MV_STOP);
  113. mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
  114. (4 << XEXCR_DST_BURST_LIMIT_OFFS) |
  115. (4 << XEXCR_SRC_BURST_LIMIT_OFFS));
  116. }
  117. }
  118. /*
  119. * mv_xor_ctrl_set - Set XOR channel control registers
  120. *
  121. * DESCRIPTION:
  122. *
  123. * INPUT:
  124. *
  125. * OUTPUT:
  126. * None.
  127. *
  128. * RETURN:
  129. * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
  130. * NOTE:
  131. * This function does not modify the Operation_mode field of control register.
  132. */
  133. int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
  134. {
  135. u32 old_value;
  136. /* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */
  137. old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) &
  138. XEXCR_OPERATION_MODE_MASK;
  139. xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK;
  140. xor_ctrl |= old_value;
  141. reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl);
  142. return MV_OK;
  143. }
  144. int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size,
  145. u32 init_val_high, u32 init_val_low)
  146. {
  147. u32 temp;
  148. if (block_size == _4G)
  149. block_size -= 1;
  150. /* Parameter checking */
  151. if (chan >= MV_XOR_MAX_CHAN)
  152. return MV_BAD_PARAM;
  153. if (MV_ACTIVE == mv_xor_state_get(chan))
  154. return MV_BUSY;
  155. if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) ||
  156. (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE))
  157. return MV_BAD_PARAM;
  158. /* set the operation mode to Memory Init */
  159. temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
  160. temp &= ~XEXCR_OPERATION_MODE_MASK;
  161. temp |= XEXCR_OPERATION_MODE_MEM_INIT;
  162. reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
  163. /*
  164. * update the start_ptr field in XOR Engine [0..1] Destination Pointer
  165. * Register
  166. */
  167. reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr);
  168. /*
  169. * update the Block_size field in the XOR Engine[0..1] Block Size
  170. * Registers
  171. */
  172. reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
  173. block_size);
  174. /*
  175. * update the field Init_val_l in the XOR Engine Initial Value Register
  176. * Low (XEIVRL)
  177. */
  178. reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low);
  179. /*
  180. * update the field Init_val_h in the XOR Engine Initial Value Register
  181. * High (XEIVRH)
  182. */
  183. reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high);
  184. /* start transfer */
  185. reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
  186. XEXACTR_XESTART_MASK);
  187. return MV_OK;
  188. }
  189. /*
  190. * mv_xor_state_get - Get XOR channel state.
  191. *
  192. * DESCRIPTION:
  193. * XOR channel activity state can be active, idle, paused.
  194. * This function retrunes the channel activity state.
  195. *
  196. * INPUT:
  197. * chan - the channel number
  198. *
  199. * OUTPUT:
  200. * None.
  201. *
  202. * RETURN:
  203. * XOR_CHANNEL_IDLE - If the engine is idle.
  204. * XOR_CHANNEL_ACTIVE - If the engine is busy.
  205. * XOR_CHANNEL_PAUSED - If the engine is paused.
  206. * MV_UNDEFINED_STATE - If the engine state is undefind or there is no
  207. * such engine
  208. */
  209. enum mv_state mv_xor_state_get(u32 chan)
  210. {
  211. u32 state;
  212. /* Parameter checking */
  213. if (chan >= MV_XOR_MAX_CHAN) {
  214. DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
  215. return MV_UNDEFINED_STATE;
  216. }
  217. /* read the current state */
  218. state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
  219. state &= XEXACTR_XESTATUS_MASK;
  220. /* return the state */
  221. switch (state) {
  222. case XEXACTR_XESTATUS_IDLE:
  223. return MV_IDLE;
  224. case XEXACTR_XESTATUS_ACTIVE:
  225. return MV_ACTIVE;
  226. case XEXACTR_XESTATUS_PAUSED:
  227. return MV_PAUSED;
  228. }
  229. return MV_UNDEFINED_STATE;
  230. }
  231. /*
  232. * mv_xor_command_set - Set command of XOR channel
  233. *
  234. * DESCRIPTION:
  235. * XOR channel can be started, idle, paused and restarted.
  236. * Paused can be set only if channel is active.
  237. * Start can be set only if channel is idle or paused.
  238. * Restart can be set only if channel is paused.
  239. * Stop can be set only if channel is active.
  240. *
  241. * INPUT:
  242. * chan - The channel number
  243. * command - The command type (start, stop, restart, pause)
  244. *
  245. * OUTPUT:
  246. * None.
  247. *
  248. * RETURN:
  249. * MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on
  250. * undefind XOR engine mode
  251. */
  252. int mv_xor_command_set(u32 chan, enum mv_command command)
  253. {
  254. enum mv_state state;
  255. /* Parameter checking */
  256. if (chan >= MV_XOR_MAX_CHAN) {
  257. DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
  258. return MV_BAD_PARAM;
  259. }
  260. /* get the current state */
  261. state = mv_xor_state_get(chan);
  262. if ((command == MV_START) && (state == MV_IDLE)) {
  263. /* command is start and current state is idle */
  264. reg_bit_set(XOR_ACTIVATION_REG
  265. (XOR_UNIT(chan), XOR_CHAN(chan)),
  266. XEXACTR_XESTART_MASK);
  267. return MV_OK;
  268. } else if ((command == MV_STOP) && (state == MV_ACTIVE)) {
  269. /* command is stop and current state is active */
  270. reg_bit_set(XOR_ACTIVATION_REG
  271. (XOR_UNIT(chan), XOR_CHAN(chan)),
  272. XEXACTR_XESTOP_MASK);
  273. return MV_OK;
  274. } else if (((enum mv_state)command == MV_PAUSED) &&
  275. (state == MV_ACTIVE)) {
  276. /* command is paused and current state is active */
  277. reg_bit_set(XOR_ACTIVATION_REG
  278. (XOR_UNIT(chan), XOR_CHAN(chan)),
  279. XEXACTR_XEPAUSE_MASK);
  280. return MV_OK;
  281. } else if ((command == MV_RESTART) && (state == MV_PAUSED)) {
  282. /* command is restart and current state is paused */
  283. reg_bit_set(XOR_ACTIVATION_REG
  284. (XOR_UNIT(chan), XOR_CHAN(chan)),
  285. XEXACTR_XERESTART_MASK);
  286. return MV_OK;
  287. } else if ((command == MV_STOP) && (state == MV_IDLE)) {
  288. /* command is stop and current state is active */
  289. return MV_OK;
  290. }
  291. /* illegal command */
  292. DB(printf("%s: ERR. Illegal command\n", __func__));
  293. return MV_BAD_PARAM;
  294. }
  295. void ddr3_new_tip_ecc_scrub(void)
  296. {
  297. u32 cs_c, max_cs;
  298. u32 cs_ena = 0;
  299. u32 dev_num = 0;
  300. uint64_t total_mem_size, cs_mem_size = 0;
  301. printf("DDR Training Sequence - Start scrubbing\n");
  302. max_cs = ddr3_tip_max_cs_get(dev_num);
  303. for (cs_c = 0; cs_c < max_cs; cs_c++)
  304. cs_ena |= 1 << cs_c;
  305. /* assume that all CS have same size */
  306. ddr3_calc_mem_cs_size(0, &cs_mem_size);
  307. mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0);
  308. total_mem_size = max_cs * cs_mem_size;
  309. mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef);
  310. /* wait for previous transfer completion */
  311. while (mv_xor_state_get(0) != MV_IDLE)
  312. ;
  313. /* Return XOR State */
  314. mv_sys_xor_finish();
  315. printf("DDR3 Training Sequence - End scrubbing\n");
  316. }
  317. /*
  318. * mv_xor_transfer - Transfer data from source to destination in one of
  319. * three modes: XOR, CRC32 or DMA
  320. *
  321. * DESCRIPTION:
  322. * This function initiates XOR channel, according to function parameters,
  323. * in order to perform XOR, CRC32 or DMA transaction.
  324. * To gain maximum performance the user is asked to keep the following
  325. * restrictions:
  326. * 1) Selected engine is available (not busy).
  327. * 2) This module does not take into consideration CPU MMU issues.
  328. * In order for the XOR engine to access the appropriate source
  329. * and destination, address parameters must be given in system
  330. * physical mode.
  331. * 3) This API does not take care of cache coherency issues. The source,
  332. * destination and, in case of chain, the descriptor list are assumed
  333. * to be cache coherent.
  334. * 4) Parameters validity.
  335. *
  336. * INPUT:
  337. * chan - XOR channel number.
  338. * type - One of three: XOR, CRC32 and DMA operations.
  339. * xor_chain_ptr - address of chain pointer
  340. *
  341. * OUTPUT:
  342. * None.
  343. *
  344. * RETURN:
  345. * MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
  346. *
  347. *******************************************************************************/
  348. int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr)
  349. {
  350. u32 temp;
  351. /* Parameter checking */
  352. if (chan >= MV_XOR_MAX_CHAN) {
  353. DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
  354. return MV_BAD_PARAM;
  355. }
  356. if (mv_xor_state_get(chan) == MV_ACTIVE) {
  357. DB(printf("%s: ERR. Channel is already active\n", __func__));
  358. return MV_BUSY;
  359. }
  360. if (xor_chain_ptr == 0x0) {
  361. DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__));
  362. return MV_BAD_PARAM;
  363. }
  364. /* read configuration register and mask the operation mode field */
  365. temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
  366. temp &= ~XEXCR_OPERATION_MODE_MASK;
  367. switch (type) {
  368. case MV_XOR:
  369. if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) {
  370. DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n",
  371. __func__));
  372. return MV_BAD_PARAM;
  373. }
  374. /* set the operation mode to XOR */
  375. temp |= XEXCR_OPERATION_MODE_XOR;
  376. break;
  377. case MV_DMA:
  378. if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) {
  379. DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
  380. __func__));
  381. return MV_BAD_PARAM;
  382. }
  383. /* set the operation mode to DMA */
  384. temp |= XEXCR_OPERATION_MODE_DMA;
  385. break;
  386. case MV_CRC32:
  387. if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) {
  388. DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
  389. __func__));
  390. return MV_BAD_PARAM;
  391. }
  392. /* set the operation mode to CRC32 */
  393. temp |= XEXCR_OPERATION_MODE_CRC;
  394. break;
  395. default:
  396. return MV_BAD_PARAM;
  397. }
  398. /* write the operation mode to the register */
  399. reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
  400. /*
  401. * update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor
  402. * Pointer Register (XExNDPR)
  403. */
  404. reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
  405. xor_chain_ptr);
  406. /* start transfer */
  407. reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
  408. XEXACTR_XESTART_MASK);
  409. return MV_OK;
  410. }