apbh_dma.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Freescale i.MX28 APBH DMA driver
  3. *
  4. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  5. * on behalf of DENX Software Engineering GmbH
  6. *
  7. * Based on code from LTIB:
  8. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, write to the Free Software Foundation, Inc.,
  22. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  23. */
  24. #include <linux/list.h>
  25. #include <common.h>
  26. #include <malloc.h>
  27. #include <asm/errno.h>
  28. #include <asm/io.h>
  29. #include <asm/arch/clock.h>
  30. #include <asm/arch/imx-regs.h>
  31. #include <asm/arch/sys_proto.h>
  32. #include <asm/arch/dma.h>
  33. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  34. /*
  35. * Test is the DMA channel is valid channel
  36. */
  37. int mxs_dma_validate_chan(int channel)
  38. {
  39. struct mxs_dma_chan *pchan;
  40. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  41. return -EINVAL;
  42. pchan = mxs_dma_channels + channel;
  43. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  44. return -EINVAL;
  45. return 0;
  46. }
  47. /*
  48. * Return the address of the command within a descriptor.
  49. */
  50. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  51. {
  52. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  53. }
  54. /*
  55. * Read a DMA channel's hardware semaphore.
  56. *
  57. * As used by the MXS platform's DMA software, the DMA channel's hardware
  58. * semaphore reflects the number of DMA commands the hardware will process, but
  59. * has not yet finished. This is a volatile value read directly from hardware,
  60. * so it must be be viewed as immediately stale.
  61. *
  62. * If the channel is not marked busy, or has finished processing all its
  63. * commands, this value should be zero.
  64. *
  65. * See mxs_dma_append() for details on how DMA command blocks must be configured
  66. * to maintain the expected behavior of the semaphore's value.
  67. */
  68. static int mxs_dma_read_semaphore(int channel)
  69. {
  70. struct mxs_apbh_regs *apbh_regs =
  71. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  72. uint32_t tmp;
  73. int ret;
  74. ret = mxs_dma_validate_chan(channel);
  75. if (ret)
  76. return ret;
  77. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  78. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  79. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  80. return tmp;
  81. }
  82. #ifndef CONFIG_SYS_DCACHE_OFF
  83. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  84. {
  85. uint32_t addr;
  86. uint32_t size;
  87. addr = (uint32_t)desc;
  88. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  89. flush_dcache_range(addr, addr + size);
  90. }
  91. #else
  92. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  93. #endif
  94. /*
  95. * Enable a DMA channel.
  96. *
  97. * If the given channel has any DMA descriptors on its active list, this
  98. * function causes the DMA hardware to begin processing them.
  99. *
  100. * This function marks the DMA channel as "busy," whether or not there are any
  101. * descriptors to process.
  102. */
  103. static int mxs_dma_enable(int channel)
  104. {
  105. struct mxs_apbh_regs *apbh_regs =
  106. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  107. unsigned int sem;
  108. struct mxs_dma_chan *pchan;
  109. struct mxs_dma_desc *pdesc;
  110. int ret;
  111. ret = mxs_dma_validate_chan(channel);
  112. if (ret)
  113. return ret;
  114. pchan = mxs_dma_channels + channel;
  115. if (pchan->pending_num == 0) {
  116. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  117. return 0;
  118. }
  119. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  120. if (pdesc == NULL)
  121. return -EFAULT;
  122. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  123. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  124. return 0;
  125. sem = mxs_dma_read_semaphore(channel);
  126. if (sem == 0)
  127. return 0;
  128. if (sem == 1) {
  129. pdesc = list_entry(pdesc->node.next,
  130. struct mxs_dma_desc, node);
  131. writel(mxs_dma_cmd_address(pdesc),
  132. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  133. }
  134. writel(pchan->pending_num,
  135. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  136. pchan->active_num += pchan->pending_num;
  137. pchan->pending_num = 0;
  138. } else {
  139. pchan->active_num += pchan->pending_num;
  140. pchan->pending_num = 0;
  141. writel(mxs_dma_cmd_address(pdesc),
  142. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  143. writel(pchan->active_num,
  144. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  145. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  146. &apbh_regs->hw_apbh_ctrl0_clr);
  147. }
  148. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  149. return 0;
  150. }
  151. /*
  152. * Disable a DMA channel.
  153. *
  154. * This function shuts down a DMA channel and marks it as "not busy." Any
  155. * descriptors on the active list are immediately moved to the head of the
  156. * "done" list, whether or not they have actually been processed by the
  157. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  158. * still appear to be active.
  159. *
  160. * This function immediately shuts down a DMA channel's hardware, aborting any
  161. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  162. * state. It is unwise to call this function if there is ANY chance the hardware
  163. * is still processing a command.
  164. */
  165. static int mxs_dma_disable(int channel)
  166. {
  167. struct mxs_dma_chan *pchan;
  168. struct mxs_apbh_regs *apbh_regs =
  169. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  170. int ret;
  171. ret = mxs_dma_validate_chan(channel);
  172. if (ret)
  173. return ret;
  174. pchan = mxs_dma_channels + channel;
  175. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  176. return -EINVAL;
  177. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  178. &apbh_regs->hw_apbh_ctrl0_set);
  179. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  180. pchan->active_num = 0;
  181. pchan->pending_num = 0;
  182. list_splice_init(&pchan->active, &pchan->done);
  183. return 0;
  184. }
  185. /*
  186. * Resets the DMA channel hardware.
  187. */
  188. static int mxs_dma_reset(int channel)
  189. {
  190. struct mxs_apbh_regs *apbh_regs =
  191. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  192. int ret;
  193. ret = mxs_dma_validate_chan(channel);
  194. if (ret)
  195. return ret;
  196. writel(1 << (channel + APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET),
  197. &apbh_regs->hw_apbh_channel_ctrl_set);
  198. return 0;
  199. }
  200. /*
  201. * Enable or disable DMA interrupt.
  202. *
  203. * This function enables the given DMA channel to interrupt the CPU.
  204. */
  205. static int mxs_dma_enable_irq(int channel, int enable)
  206. {
  207. struct mxs_apbh_regs *apbh_regs =
  208. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  209. int ret;
  210. ret = mxs_dma_validate_chan(channel);
  211. if (ret)
  212. return ret;
  213. if (enable)
  214. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  215. &apbh_regs->hw_apbh_ctrl1_set);
  216. else
  217. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  218. &apbh_regs->hw_apbh_ctrl1_clr);
  219. return 0;
  220. }
  221. /*
  222. * Clear DMA interrupt.
  223. *
  224. * The software that is using the DMA channel must register to receive its
  225. * interrupts and, when they arrive, must call this function to clear them.
  226. */
  227. static int mxs_dma_ack_irq(int channel)
  228. {
  229. struct mxs_apbh_regs *apbh_regs =
  230. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  231. int ret;
  232. ret = mxs_dma_validate_chan(channel);
  233. if (ret)
  234. return ret;
  235. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  236. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  237. return 0;
  238. }
  239. /*
  240. * Request to reserve a DMA channel
  241. */
  242. static int mxs_dma_request(int channel)
  243. {
  244. struct mxs_dma_chan *pchan;
  245. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  246. return -EINVAL;
  247. pchan = mxs_dma_channels + channel;
  248. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  249. return -ENODEV;
  250. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  251. return -EBUSY;
  252. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  253. pchan->active_num = 0;
  254. pchan->pending_num = 0;
  255. INIT_LIST_HEAD(&pchan->active);
  256. INIT_LIST_HEAD(&pchan->done);
  257. return 0;
  258. }
  259. /*
  260. * Release a DMA channel.
  261. *
  262. * This function releases a DMA channel from its current owner.
  263. *
  264. * The channel will NOT be released if it's marked "busy" (see
  265. * mxs_dma_enable()).
  266. */
  267. int mxs_dma_release(int channel)
  268. {
  269. struct mxs_dma_chan *pchan;
  270. int ret;
  271. ret = mxs_dma_validate_chan(channel);
  272. if (ret)
  273. return ret;
  274. pchan = mxs_dma_channels + channel;
  275. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  276. return -EBUSY;
  277. pchan->dev = 0;
  278. pchan->active_num = 0;
  279. pchan->pending_num = 0;
  280. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  281. return 0;
  282. }
  283. /*
  284. * Allocate DMA descriptor
  285. */
  286. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  287. {
  288. struct mxs_dma_desc *pdesc;
  289. uint32_t size;
  290. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  291. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  292. if (pdesc == NULL)
  293. return NULL;
  294. memset(pdesc, 0, sizeof(*pdesc));
  295. pdesc->address = (dma_addr_t)pdesc;
  296. return pdesc;
  297. };
  298. /*
  299. * Free DMA descriptor
  300. */
  301. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  302. {
  303. if (pdesc == NULL)
  304. return;
  305. free(pdesc);
  306. }
  307. /*
  308. * Add a DMA descriptor to a channel.
  309. *
  310. * If the descriptor list for this channel is not empty, this function sets the
  311. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  312. * it will chain to the new descriptor's command.
  313. *
  314. * Then, this function marks the new descriptor as "ready," adds it to the end
  315. * of the active descriptor list, and increments the count of pending
  316. * descriptors.
  317. *
  318. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  319. * important invariants. These rules are NOT checked, but they must be carefully
  320. * applied by software that uses MXS DMA channels.
  321. *
  322. * Invariant:
  323. * The DMA channel's hardware semaphore must reflect the number of DMA
  324. * commands the hardware will process, but has not yet finished.
  325. *
  326. * Explanation:
  327. * A DMA channel begins processing commands when its hardware semaphore is
  328. * written with a value greater than zero, and it stops processing commands
  329. * when the semaphore returns to zero.
  330. *
  331. * When a channel finishes a DMA command, it will decrement its semaphore if
  332. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  333. *
  334. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  335. * unless it suits the purposes of the software. For example, one could
  336. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  337. * bit set only in the last one. Then, setting the DMA channel's hardware
  338. * semaphore to one would cause the entire series of five commands to be
  339. * processed. However, this example would violate the invariant given above.
  340. *
  341. * Rule:
  342. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  343. * channel's hardware semaphore will be decremented EVERY time a command is
  344. * processed.
  345. */
  346. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  347. {
  348. struct mxs_dma_chan *pchan;
  349. struct mxs_dma_desc *last;
  350. int ret;
  351. ret = mxs_dma_validate_chan(channel);
  352. if (ret)
  353. return ret;
  354. pchan = mxs_dma_channels + channel;
  355. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  356. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  357. if (!list_empty(&pchan->active)) {
  358. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  359. node);
  360. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  361. last->flags &= ~MXS_DMA_DESC_LAST;
  362. last->cmd.next = mxs_dma_cmd_address(pdesc);
  363. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  364. mxs_dma_flush_desc(last);
  365. }
  366. pdesc->flags |= MXS_DMA_DESC_READY;
  367. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  368. pchan->pending_num++;
  369. list_add_tail(&pdesc->node, &pchan->active);
  370. mxs_dma_flush_desc(pdesc);
  371. return ret;
  372. }
  373. /*
  374. * Clean up processed DMA descriptors.
  375. *
  376. * This function removes processed DMA descriptors from the "active" list. Pass
  377. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  378. * to get the descriptors moved to the channel's "done" list. Descriptors on
  379. * the "done" list can be retrieved with mxs_dma_get_finished().
  380. *
  381. * This function marks the DMA channel as "not busy" if no unprocessed
  382. * descriptors remain on the "active" list.
  383. */
  384. static int mxs_dma_finish(int channel, struct list_head *head)
  385. {
  386. int sem;
  387. struct mxs_dma_chan *pchan;
  388. struct list_head *p, *q;
  389. struct mxs_dma_desc *pdesc;
  390. int ret;
  391. ret = mxs_dma_validate_chan(channel);
  392. if (ret)
  393. return ret;
  394. pchan = mxs_dma_channels + channel;
  395. sem = mxs_dma_read_semaphore(channel);
  396. if (sem < 0)
  397. return sem;
  398. if (sem == pchan->active_num)
  399. return 0;
  400. list_for_each_safe(p, q, &pchan->active) {
  401. if ((pchan->active_num) <= sem)
  402. break;
  403. pdesc = list_entry(p, struct mxs_dma_desc, node);
  404. pdesc->flags &= ~MXS_DMA_DESC_READY;
  405. if (head)
  406. list_move_tail(p, head);
  407. else
  408. list_move_tail(p, &pchan->done);
  409. if (pdesc->flags & MXS_DMA_DESC_LAST)
  410. pchan->active_num--;
  411. }
  412. if (sem == 0)
  413. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  414. return 0;
  415. }
  416. /*
  417. * Wait for DMA channel to complete
  418. */
  419. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  420. {
  421. struct mxs_apbh_regs *apbh_regs =
  422. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  423. int ret;
  424. ret = mxs_dma_validate_chan(chan);
  425. if (ret)
  426. return ret;
  427. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  428. 1 << chan, timeout)) {
  429. ret = -ETIMEDOUT;
  430. mxs_dma_reset(chan);
  431. }
  432. return ret;
  433. }
  434. /*
  435. * Execute the DMA channel
  436. */
  437. int mxs_dma_go(int chan)
  438. {
  439. uint32_t timeout = 10000;
  440. int ret;
  441. LIST_HEAD(tmp_desc_list);
  442. mxs_dma_enable_irq(chan, 1);
  443. mxs_dma_enable(chan);
  444. /* Wait for DMA to finish. */
  445. ret = mxs_dma_wait_complete(timeout, chan);
  446. /* Clear out the descriptors we just ran. */
  447. mxs_dma_finish(chan, &tmp_desc_list);
  448. /* Shut the DMA channel down. */
  449. mxs_dma_ack_irq(chan);
  450. mxs_dma_reset(chan);
  451. mxs_dma_enable_irq(chan, 0);
  452. mxs_dma_disable(chan);
  453. return ret;
  454. }
  455. /*
  456. * Initialize the DMA hardware
  457. */
  458. void mxs_dma_init(void)
  459. {
  460. struct mxs_apbh_regs *apbh_regs =
  461. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  462. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  463. #ifdef CONFIG_APBH_DMA_BURST8
  464. writel(APBH_CTRL0_AHB_BURST8_EN,
  465. &apbh_regs->hw_apbh_ctrl0_set);
  466. #else
  467. writel(APBH_CTRL0_AHB_BURST8_EN,
  468. &apbh_regs->hw_apbh_ctrl0_clr);
  469. #endif
  470. #ifdef CONFIG_APBH_DMA_BURST
  471. writel(APBH_CTRL0_APB_BURST_EN,
  472. &apbh_regs->hw_apbh_ctrl0_set);
  473. #else
  474. writel(APBH_CTRL0_APB_BURST_EN,
  475. &apbh_regs->hw_apbh_ctrl0_clr);
  476. #endif
  477. }
  478. int mxs_dma_init_channel(int channel)
  479. {
  480. struct mxs_dma_chan *pchan;
  481. int ret;
  482. pchan = mxs_dma_channels + channel;
  483. pchan->flags = MXS_DMA_FLAGS_VALID;
  484. ret = mxs_dma_request(channel);
  485. if (ret) {
  486. printf("MXS DMA: Can't acquire DMA channel %i\n",
  487. channel);
  488. return ret;
  489. }
  490. mxs_dma_reset(channel);
  491. mxs_dma_ack_irq(channel);
  492. return 0;
  493. }