apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale i.MX28 APBH DMA driver
  4. *
  5. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  6. * on behalf of DENX Software Engineering GmbH
  7. *
  8. * Based on code from LTIB:
  9. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  10. */
  11. #include <linux/list.h>
  12. #include <common.h>
  13. #include <malloc.h>
  14. #include <linux/errno.h>
  15. #include <asm/io.h>
  16. #include <asm/arch/clock.h>
  17. #include <asm/arch/imx-regs.h>
  18. #include <asm/arch/sys_proto.h>
  19. #include <asm/mach-imx/dma.h>
  20. #include <asm/mach-imx/regs-apbh.h>
  21. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  22. /*
  23. * Test is the DMA channel is valid channel
  24. */
  25. int mxs_dma_validate_chan(int channel)
  26. {
  27. struct mxs_dma_chan *pchan;
  28. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  29. return -EINVAL;
  30. pchan = mxs_dma_channels + channel;
  31. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  32. return -EINVAL;
  33. return 0;
  34. }
  35. /*
  36. * Return the address of the command within a descriptor.
  37. */
  38. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  39. {
  40. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  41. }
  42. /*
  43. * Read a DMA channel's hardware semaphore.
  44. *
  45. * As used by the MXS platform's DMA software, the DMA channel's hardware
  46. * semaphore reflects the number of DMA commands the hardware will process, but
  47. * has not yet finished. This is a volatile value read directly from hardware,
  48. * so it must be be viewed as immediately stale.
  49. *
  50. * If the channel is not marked busy, or has finished processing all its
  51. * commands, this value should be zero.
  52. *
  53. * See mxs_dma_append() for details on how DMA command blocks must be configured
  54. * to maintain the expected behavior of the semaphore's value.
  55. */
  56. static int mxs_dma_read_semaphore(int channel)
  57. {
  58. struct mxs_apbh_regs *apbh_regs =
  59. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  60. uint32_t tmp;
  61. int ret;
  62. ret = mxs_dma_validate_chan(channel);
  63. if (ret)
  64. return ret;
  65. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  66. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  67. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  68. return tmp;
  69. }
  70. #ifndef CONFIG_SYS_DCACHE_OFF
  71. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  72. {
  73. uint32_t addr;
  74. uint32_t size;
  75. addr = (uint32_t)desc;
  76. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  77. flush_dcache_range(addr, addr + size);
  78. }
  79. #else
  80. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  81. #endif
  82. /*
  83. * Enable a DMA channel.
  84. *
  85. * If the given channel has any DMA descriptors on its active list, this
  86. * function causes the DMA hardware to begin processing them.
  87. *
  88. * This function marks the DMA channel as "busy," whether or not there are any
  89. * descriptors to process.
  90. */
  91. static int mxs_dma_enable(int channel)
  92. {
  93. struct mxs_apbh_regs *apbh_regs =
  94. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  95. unsigned int sem;
  96. struct mxs_dma_chan *pchan;
  97. struct mxs_dma_desc *pdesc;
  98. int ret;
  99. ret = mxs_dma_validate_chan(channel);
  100. if (ret)
  101. return ret;
  102. pchan = mxs_dma_channels + channel;
  103. if (pchan->pending_num == 0) {
  104. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  105. return 0;
  106. }
  107. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  108. if (pdesc == NULL)
  109. return -EFAULT;
  110. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  111. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  112. return 0;
  113. sem = mxs_dma_read_semaphore(channel);
  114. if (sem == 0)
  115. return 0;
  116. if (sem == 1) {
  117. pdesc = list_entry(pdesc->node.next,
  118. struct mxs_dma_desc, node);
  119. writel(mxs_dma_cmd_address(pdesc),
  120. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  121. }
  122. writel(pchan->pending_num,
  123. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  124. pchan->active_num += pchan->pending_num;
  125. pchan->pending_num = 0;
  126. } else {
  127. pchan->active_num += pchan->pending_num;
  128. pchan->pending_num = 0;
  129. writel(mxs_dma_cmd_address(pdesc),
  130. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  131. writel(pchan->active_num,
  132. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  133. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  134. &apbh_regs->hw_apbh_ctrl0_clr);
  135. }
  136. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  137. return 0;
  138. }
  139. /*
  140. * Disable a DMA channel.
  141. *
  142. * This function shuts down a DMA channel and marks it as "not busy." Any
  143. * descriptors on the active list are immediately moved to the head of the
  144. * "done" list, whether or not they have actually been processed by the
  145. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  146. * still appear to be active.
  147. *
  148. * This function immediately shuts down a DMA channel's hardware, aborting any
  149. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  150. * state. It is unwise to call this function if there is ANY chance the hardware
  151. * is still processing a command.
  152. */
  153. static int mxs_dma_disable(int channel)
  154. {
  155. struct mxs_dma_chan *pchan;
  156. struct mxs_apbh_regs *apbh_regs =
  157. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  158. int ret;
  159. ret = mxs_dma_validate_chan(channel);
  160. if (ret)
  161. return ret;
  162. pchan = mxs_dma_channels + channel;
  163. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  164. return -EINVAL;
  165. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  166. &apbh_regs->hw_apbh_ctrl0_set);
  167. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  168. pchan->active_num = 0;
  169. pchan->pending_num = 0;
  170. list_splice_init(&pchan->active, &pchan->done);
  171. return 0;
  172. }
  173. /*
  174. * Resets the DMA channel hardware.
  175. */
  176. static int mxs_dma_reset(int channel)
  177. {
  178. struct mxs_apbh_regs *apbh_regs =
  179. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  180. int ret;
  181. #if defined(CONFIG_MX23)
  182. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  183. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  184. #elif (defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7))
  185. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  186. uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  187. #endif
  188. ret = mxs_dma_validate_chan(channel);
  189. if (ret)
  190. return ret;
  191. writel(1 << (channel + offset), setreg);
  192. return 0;
  193. }
  194. /*
  195. * Enable or disable DMA interrupt.
  196. *
  197. * This function enables the given DMA channel to interrupt the CPU.
  198. */
  199. static int mxs_dma_enable_irq(int channel, int enable)
  200. {
  201. struct mxs_apbh_regs *apbh_regs =
  202. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  203. int ret;
  204. ret = mxs_dma_validate_chan(channel);
  205. if (ret)
  206. return ret;
  207. if (enable)
  208. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  209. &apbh_regs->hw_apbh_ctrl1_set);
  210. else
  211. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  212. &apbh_regs->hw_apbh_ctrl1_clr);
  213. return 0;
  214. }
  215. /*
  216. * Clear DMA interrupt.
  217. *
  218. * The software that is using the DMA channel must register to receive its
  219. * interrupts and, when they arrive, must call this function to clear them.
  220. */
  221. static int mxs_dma_ack_irq(int channel)
  222. {
  223. struct mxs_apbh_regs *apbh_regs =
  224. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  225. int ret;
  226. ret = mxs_dma_validate_chan(channel);
  227. if (ret)
  228. return ret;
  229. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  230. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  231. return 0;
  232. }
  233. /*
  234. * Request to reserve a DMA channel
  235. */
  236. static int mxs_dma_request(int channel)
  237. {
  238. struct mxs_dma_chan *pchan;
  239. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  240. return -EINVAL;
  241. pchan = mxs_dma_channels + channel;
  242. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  243. return -ENODEV;
  244. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  245. return -EBUSY;
  246. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  247. pchan->active_num = 0;
  248. pchan->pending_num = 0;
  249. INIT_LIST_HEAD(&pchan->active);
  250. INIT_LIST_HEAD(&pchan->done);
  251. return 0;
  252. }
  253. /*
  254. * Release a DMA channel.
  255. *
  256. * This function releases a DMA channel from its current owner.
  257. *
  258. * The channel will NOT be released if it's marked "busy" (see
  259. * mxs_dma_enable()).
  260. */
  261. int mxs_dma_release(int channel)
  262. {
  263. struct mxs_dma_chan *pchan;
  264. int ret;
  265. ret = mxs_dma_validate_chan(channel);
  266. if (ret)
  267. return ret;
  268. pchan = mxs_dma_channels + channel;
  269. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  270. return -EBUSY;
  271. pchan->dev = 0;
  272. pchan->active_num = 0;
  273. pchan->pending_num = 0;
  274. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  275. return 0;
  276. }
  277. /*
  278. * Allocate DMA descriptor
  279. */
  280. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  281. {
  282. struct mxs_dma_desc *pdesc;
  283. uint32_t size;
  284. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  285. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  286. if (pdesc == NULL)
  287. return NULL;
  288. memset(pdesc, 0, sizeof(*pdesc));
  289. pdesc->address = (dma_addr_t)pdesc;
  290. return pdesc;
  291. };
  292. /*
  293. * Free DMA descriptor
  294. */
  295. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  296. {
  297. if (pdesc == NULL)
  298. return;
  299. free(pdesc);
  300. }
  301. /*
  302. * Add a DMA descriptor to a channel.
  303. *
  304. * If the descriptor list for this channel is not empty, this function sets the
  305. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  306. * it will chain to the new descriptor's command.
  307. *
  308. * Then, this function marks the new descriptor as "ready," adds it to the end
  309. * of the active descriptor list, and increments the count of pending
  310. * descriptors.
  311. *
  312. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  313. * important invariants. These rules are NOT checked, but they must be carefully
  314. * applied by software that uses MXS DMA channels.
  315. *
  316. * Invariant:
  317. * The DMA channel's hardware semaphore must reflect the number of DMA
  318. * commands the hardware will process, but has not yet finished.
  319. *
  320. * Explanation:
  321. * A DMA channel begins processing commands when its hardware semaphore is
  322. * written with a value greater than zero, and it stops processing commands
  323. * when the semaphore returns to zero.
  324. *
  325. * When a channel finishes a DMA command, it will decrement its semaphore if
  326. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  327. *
  328. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  329. * unless it suits the purposes of the software. For example, one could
  330. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  331. * bit set only in the last one. Then, setting the DMA channel's hardware
  332. * semaphore to one would cause the entire series of five commands to be
  333. * processed. However, this example would violate the invariant given above.
  334. *
  335. * Rule:
  336. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  337. * channel's hardware semaphore will be decremented EVERY time a command is
  338. * processed.
  339. */
  340. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  341. {
  342. struct mxs_dma_chan *pchan;
  343. struct mxs_dma_desc *last;
  344. int ret;
  345. ret = mxs_dma_validate_chan(channel);
  346. if (ret)
  347. return ret;
  348. pchan = mxs_dma_channels + channel;
  349. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  350. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  351. if (!list_empty(&pchan->active)) {
  352. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  353. node);
  354. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  355. last->flags &= ~MXS_DMA_DESC_LAST;
  356. last->cmd.next = mxs_dma_cmd_address(pdesc);
  357. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  358. mxs_dma_flush_desc(last);
  359. }
  360. pdesc->flags |= MXS_DMA_DESC_READY;
  361. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  362. pchan->pending_num++;
  363. list_add_tail(&pdesc->node, &pchan->active);
  364. mxs_dma_flush_desc(pdesc);
  365. return ret;
  366. }
  367. /*
  368. * Clean up processed DMA descriptors.
  369. *
  370. * This function removes processed DMA descriptors from the "active" list. Pass
  371. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  372. * to get the descriptors moved to the channel's "done" list. Descriptors on
  373. * the "done" list can be retrieved with mxs_dma_get_finished().
  374. *
  375. * This function marks the DMA channel as "not busy" if no unprocessed
  376. * descriptors remain on the "active" list.
  377. */
  378. static int mxs_dma_finish(int channel, struct list_head *head)
  379. {
  380. int sem;
  381. struct mxs_dma_chan *pchan;
  382. struct list_head *p, *q;
  383. struct mxs_dma_desc *pdesc;
  384. int ret;
  385. ret = mxs_dma_validate_chan(channel);
  386. if (ret)
  387. return ret;
  388. pchan = mxs_dma_channels + channel;
  389. sem = mxs_dma_read_semaphore(channel);
  390. if (sem < 0)
  391. return sem;
  392. if (sem == pchan->active_num)
  393. return 0;
  394. list_for_each_safe(p, q, &pchan->active) {
  395. if ((pchan->active_num) <= sem)
  396. break;
  397. pdesc = list_entry(p, struct mxs_dma_desc, node);
  398. pdesc->flags &= ~MXS_DMA_DESC_READY;
  399. if (head)
  400. list_move_tail(p, head);
  401. else
  402. list_move_tail(p, &pchan->done);
  403. if (pdesc->flags & MXS_DMA_DESC_LAST)
  404. pchan->active_num--;
  405. }
  406. if (sem == 0)
  407. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  408. return 0;
  409. }
  410. /*
  411. * Wait for DMA channel to complete
  412. */
  413. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  414. {
  415. struct mxs_apbh_regs *apbh_regs =
  416. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  417. int ret;
  418. ret = mxs_dma_validate_chan(chan);
  419. if (ret)
  420. return ret;
  421. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  422. 1 << chan, timeout)) {
  423. ret = -ETIMEDOUT;
  424. mxs_dma_reset(chan);
  425. }
  426. return ret;
  427. }
  428. /*
  429. * Execute the DMA channel
  430. */
  431. int mxs_dma_go(int chan)
  432. {
  433. uint32_t timeout = 10000000;
  434. int ret;
  435. LIST_HEAD(tmp_desc_list);
  436. mxs_dma_enable_irq(chan, 1);
  437. mxs_dma_enable(chan);
  438. /* Wait for DMA to finish. */
  439. ret = mxs_dma_wait_complete(timeout, chan);
  440. /* Clear out the descriptors we just ran. */
  441. mxs_dma_finish(chan, &tmp_desc_list);
  442. /* Shut the DMA channel down. */
  443. mxs_dma_ack_irq(chan);
  444. mxs_dma_reset(chan);
  445. mxs_dma_enable_irq(chan, 0);
  446. mxs_dma_disable(chan);
  447. return ret;
  448. }
  449. /*
  450. * Execute a continuously running circular DMA descriptor.
  451. * NOTE: This is not intended for general use, but rather
  452. * for the LCD driver in Smart-LCD mode. It allows
  453. * continuous triggering of the RUN bit there.
  454. */
  455. void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
  456. {
  457. struct mxs_apbh_regs *apbh_regs =
  458. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  459. mxs_dma_flush_desc(pdesc);
  460. mxs_dma_enable_irq(chan, 1);
  461. writel(mxs_dma_cmd_address(pdesc),
  462. &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
  463. writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
  464. writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  465. &apbh_regs->hw_apbh_ctrl0_clr);
  466. }
  467. /*
  468. * Initialize the DMA hardware
  469. */
  470. void mxs_dma_init(void)
  471. {
  472. struct mxs_apbh_regs *apbh_regs =
  473. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  474. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  475. #ifdef CONFIG_APBH_DMA_BURST8
  476. writel(APBH_CTRL0_AHB_BURST8_EN,
  477. &apbh_regs->hw_apbh_ctrl0_set);
  478. #else
  479. writel(APBH_CTRL0_AHB_BURST8_EN,
  480. &apbh_regs->hw_apbh_ctrl0_clr);
  481. #endif
  482. #ifdef CONFIG_APBH_DMA_BURST
  483. writel(APBH_CTRL0_APB_BURST_EN,
  484. &apbh_regs->hw_apbh_ctrl0_set);
  485. #else
  486. writel(APBH_CTRL0_APB_BURST_EN,
  487. &apbh_regs->hw_apbh_ctrl0_clr);
  488. #endif
  489. }
  490. int mxs_dma_init_channel(int channel)
  491. {
  492. struct mxs_dma_chan *pchan;
  493. int ret;
  494. pchan = mxs_dma_channels + channel;
  495. pchan->flags = MXS_DMA_FLAGS_VALID;
  496. ret = mxs_dma_request(channel);
  497. if (ret) {
  498. printf("MXS DMA: Can't acquire DMA channel %i\n",
  499. channel);
  500. return ret;
  501. }
  502. mxs_dma_reset(channel);
  503. mxs_dma_ack_irq(channel);
  504. return 0;
  505. }