apbh_dma.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /*
  2. * Freescale i.MX28 APBH DMA driver
  3. *
  4. * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
  5. * on behalf of DENX Software Engineering GmbH
  6. *
  7. * Based on code from LTIB:
  8. * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, write to the Free Software Foundation, Inc.,
  22. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  23. */
  24. #include <linux/list.h>
  25. #include <common.h>
  26. #include <malloc.h>
  27. #include <asm/errno.h>
  28. #include <asm/io.h>
  29. #include <asm/arch/clock.h>
  30. #include <asm/arch/imx-regs.h>
  31. #include <asm/arch/sys_proto.h>
  32. #include <asm/imx-common/dma.h>
  33. #include <asm/imx-common/regs-apbh.h>
  34. static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
  35. /*
  36. * Test is the DMA channel is valid channel
  37. */
  38. int mxs_dma_validate_chan(int channel)
  39. {
  40. struct mxs_dma_chan *pchan;
  41. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  42. return -EINVAL;
  43. pchan = mxs_dma_channels + channel;
  44. if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
  45. return -EINVAL;
  46. return 0;
  47. }
  48. /*
  49. * Return the address of the command within a descriptor.
  50. */
  51. static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
  52. {
  53. return desc->address + offsetof(struct mxs_dma_desc, cmd);
  54. }
  55. /*
  56. * Read a DMA channel's hardware semaphore.
  57. *
  58. * As used by the MXS platform's DMA software, the DMA channel's hardware
  59. * semaphore reflects the number of DMA commands the hardware will process, but
  60. * has not yet finished. This is a volatile value read directly from hardware,
  61. * so it must be be viewed as immediately stale.
  62. *
  63. * If the channel is not marked busy, or has finished processing all its
  64. * commands, this value should be zero.
  65. *
  66. * See mxs_dma_append() for details on how DMA command blocks must be configured
  67. * to maintain the expected behavior of the semaphore's value.
  68. */
  69. static int mxs_dma_read_semaphore(int channel)
  70. {
  71. struct mxs_apbh_regs *apbh_regs =
  72. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  73. uint32_t tmp;
  74. int ret;
  75. ret = mxs_dma_validate_chan(channel);
  76. if (ret)
  77. return ret;
  78. tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
  79. tmp &= APBH_CHn_SEMA_PHORE_MASK;
  80. tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
  81. return tmp;
  82. }
  83. #ifndef CONFIG_SYS_DCACHE_OFF
  84. void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
  85. {
  86. uint32_t addr;
  87. uint32_t size;
  88. addr = (uint32_t)desc;
  89. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  90. flush_dcache_range(addr, addr + size);
  91. }
  92. #else
  93. inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
  94. #endif
  95. /*
  96. * Enable a DMA channel.
  97. *
  98. * If the given channel has any DMA descriptors on its active list, this
  99. * function causes the DMA hardware to begin processing them.
  100. *
  101. * This function marks the DMA channel as "busy," whether or not there are any
  102. * descriptors to process.
  103. */
  104. static int mxs_dma_enable(int channel)
  105. {
  106. struct mxs_apbh_regs *apbh_regs =
  107. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  108. unsigned int sem;
  109. struct mxs_dma_chan *pchan;
  110. struct mxs_dma_desc *pdesc;
  111. int ret;
  112. ret = mxs_dma_validate_chan(channel);
  113. if (ret)
  114. return ret;
  115. pchan = mxs_dma_channels + channel;
  116. if (pchan->pending_num == 0) {
  117. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  118. return 0;
  119. }
  120. pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
  121. if (pdesc == NULL)
  122. return -EFAULT;
  123. if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
  124. if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
  125. return 0;
  126. sem = mxs_dma_read_semaphore(channel);
  127. if (sem == 0)
  128. return 0;
  129. if (sem == 1) {
  130. pdesc = list_entry(pdesc->node.next,
  131. struct mxs_dma_desc, node);
  132. writel(mxs_dma_cmd_address(pdesc),
  133. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  134. }
  135. writel(pchan->pending_num,
  136. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  137. pchan->active_num += pchan->pending_num;
  138. pchan->pending_num = 0;
  139. } else {
  140. pchan->active_num += pchan->pending_num;
  141. pchan->pending_num = 0;
  142. writel(mxs_dma_cmd_address(pdesc),
  143. &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
  144. writel(pchan->active_num,
  145. &apbh_regs->ch[channel].hw_apbh_ch_sema);
  146. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  147. &apbh_regs->hw_apbh_ctrl0_clr);
  148. }
  149. pchan->flags |= MXS_DMA_FLAGS_BUSY;
  150. return 0;
  151. }
  152. /*
  153. * Disable a DMA channel.
  154. *
  155. * This function shuts down a DMA channel and marks it as "not busy." Any
  156. * descriptors on the active list are immediately moved to the head of the
  157. * "done" list, whether or not they have actually been processed by the
  158. * hardware. The "ready" flags of these descriptors are NOT cleared, so they
  159. * still appear to be active.
  160. *
  161. * This function immediately shuts down a DMA channel's hardware, aborting any
  162. * I/O that may be in progress, potentially leaving I/O hardware in an undefined
  163. * state. It is unwise to call this function if there is ANY chance the hardware
  164. * is still processing a command.
  165. */
  166. static int mxs_dma_disable(int channel)
  167. {
  168. struct mxs_dma_chan *pchan;
  169. struct mxs_apbh_regs *apbh_regs =
  170. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  171. int ret;
  172. ret = mxs_dma_validate_chan(channel);
  173. if (ret)
  174. return ret;
  175. pchan = mxs_dma_channels + channel;
  176. if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
  177. return -EINVAL;
  178. writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
  179. &apbh_regs->hw_apbh_ctrl0_set);
  180. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  181. pchan->active_num = 0;
  182. pchan->pending_num = 0;
  183. list_splice_init(&pchan->active, &pchan->done);
  184. return 0;
  185. }
  186. /*
  187. * Resets the DMA channel hardware.
  188. */
  189. static int mxs_dma_reset(int channel)
  190. {
  191. struct mxs_apbh_regs *apbh_regs =
  192. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  193. int ret;
  194. #if defined(CONFIG_MX23)
  195. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
  196. uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
  197. #elif defined(CONFIG_MX28)
  198. uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
  199. uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
  200. #endif
  201. ret = mxs_dma_validate_chan(channel);
  202. if (ret)
  203. return ret;
  204. writel(1 << (channel + offset), setreg);
  205. return 0;
  206. }
  207. /*
  208. * Enable or disable DMA interrupt.
  209. *
  210. * This function enables the given DMA channel to interrupt the CPU.
  211. */
  212. static int mxs_dma_enable_irq(int channel, int enable)
  213. {
  214. struct mxs_apbh_regs *apbh_regs =
  215. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  216. int ret;
  217. ret = mxs_dma_validate_chan(channel);
  218. if (ret)
  219. return ret;
  220. if (enable)
  221. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  222. &apbh_regs->hw_apbh_ctrl1_set);
  223. else
  224. writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
  225. &apbh_regs->hw_apbh_ctrl1_clr);
  226. return 0;
  227. }
  228. /*
  229. * Clear DMA interrupt.
  230. *
  231. * The software that is using the DMA channel must register to receive its
  232. * interrupts and, when they arrive, must call this function to clear them.
  233. */
  234. static int mxs_dma_ack_irq(int channel)
  235. {
  236. struct mxs_apbh_regs *apbh_regs =
  237. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  238. int ret;
  239. ret = mxs_dma_validate_chan(channel);
  240. if (ret)
  241. return ret;
  242. writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
  243. writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
  244. return 0;
  245. }
  246. /*
  247. * Request to reserve a DMA channel
  248. */
  249. static int mxs_dma_request(int channel)
  250. {
  251. struct mxs_dma_chan *pchan;
  252. if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
  253. return -EINVAL;
  254. pchan = mxs_dma_channels + channel;
  255. if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
  256. return -ENODEV;
  257. if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
  258. return -EBUSY;
  259. pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
  260. pchan->active_num = 0;
  261. pchan->pending_num = 0;
  262. INIT_LIST_HEAD(&pchan->active);
  263. INIT_LIST_HEAD(&pchan->done);
  264. return 0;
  265. }
  266. /*
  267. * Release a DMA channel.
  268. *
  269. * This function releases a DMA channel from its current owner.
  270. *
  271. * The channel will NOT be released if it's marked "busy" (see
  272. * mxs_dma_enable()).
  273. */
  274. int mxs_dma_release(int channel)
  275. {
  276. struct mxs_dma_chan *pchan;
  277. int ret;
  278. ret = mxs_dma_validate_chan(channel);
  279. if (ret)
  280. return ret;
  281. pchan = mxs_dma_channels + channel;
  282. if (pchan->flags & MXS_DMA_FLAGS_BUSY)
  283. return -EBUSY;
  284. pchan->dev = 0;
  285. pchan->active_num = 0;
  286. pchan->pending_num = 0;
  287. pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
  288. return 0;
  289. }
  290. /*
  291. * Allocate DMA descriptor
  292. */
  293. struct mxs_dma_desc *mxs_dma_desc_alloc(void)
  294. {
  295. struct mxs_dma_desc *pdesc;
  296. uint32_t size;
  297. size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
  298. pdesc = memalign(MXS_DMA_ALIGNMENT, size);
  299. if (pdesc == NULL)
  300. return NULL;
  301. memset(pdesc, 0, sizeof(*pdesc));
  302. pdesc->address = (dma_addr_t)pdesc;
  303. return pdesc;
  304. };
  305. /*
  306. * Free DMA descriptor
  307. */
  308. void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
  309. {
  310. if (pdesc == NULL)
  311. return;
  312. free(pdesc);
  313. }
  314. /*
  315. * Add a DMA descriptor to a channel.
  316. *
  317. * If the descriptor list for this channel is not empty, this function sets the
  318. * CHAIN bit and the NEXTCMD_ADDR fields in the last descriptor's DMA command so
  319. * it will chain to the new descriptor's command.
  320. *
  321. * Then, this function marks the new descriptor as "ready," adds it to the end
  322. * of the active descriptor list, and increments the count of pending
  323. * descriptors.
  324. *
  325. * The MXS platform DMA software imposes some rules on DMA commands to maintain
  326. * important invariants. These rules are NOT checked, but they must be carefully
  327. * applied by software that uses MXS DMA channels.
  328. *
  329. * Invariant:
  330. * The DMA channel's hardware semaphore must reflect the number of DMA
  331. * commands the hardware will process, but has not yet finished.
  332. *
  333. * Explanation:
  334. * A DMA channel begins processing commands when its hardware semaphore is
  335. * written with a value greater than zero, and it stops processing commands
  336. * when the semaphore returns to zero.
  337. *
  338. * When a channel finishes a DMA command, it will decrement its semaphore if
  339. * the DECREMENT_SEMAPHORE bit is set in that command's flags bits.
  340. *
  341. * In principle, it's not necessary for the DECREMENT_SEMAPHORE to be set,
  342. * unless it suits the purposes of the software. For example, one could
  343. * construct a series of five DMA commands, with the DECREMENT_SEMAPHORE
  344. * bit set only in the last one. Then, setting the DMA channel's hardware
  345. * semaphore to one would cause the entire series of five commands to be
  346. * processed. However, this example would violate the invariant given above.
  347. *
  348. * Rule:
  349. * ALL DMA commands MUST have the DECREMENT_SEMAPHORE bit set so that the DMA
  350. * channel's hardware semaphore will be decremented EVERY time a command is
  351. * processed.
  352. */
  353. int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
  354. {
  355. struct mxs_dma_chan *pchan;
  356. struct mxs_dma_desc *last;
  357. int ret;
  358. ret = mxs_dma_validate_chan(channel);
  359. if (ret)
  360. return ret;
  361. pchan = mxs_dma_channels + channel;
  362. pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
  363. pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
  364. if (!list_empty(&pchan->active)) {
  365. last = list_entry(pchan->active.prev, struct mxs_dma_desc,
  366. node);
  367. pdesc->flags &= ~MXS_DMA_DESC_FIRST;
  368. last->flags &= ~MXS_DMA_DESC_LAST;
  369. last->cmd.next = mxs_dma_cmd_address(pdesc);
  370. last->cmd.data |= MXS_DMA_DESC_CHAIN;
  371. mxs_dma_flush_desc(last);
  372. }
  373. pdesc->flags |= MXS_DMA_DESC_READY;
  374. if (pdesc->flags & MXS_DMA_DESC_FIRST)
  375. pchan->pending_num++;
  376. list_add_tail(&pdesc->node, &pchan->active);
  377. mxs_dma_flush_desc(pdesc);
  378. return ret;
  379. }
  380. /*
  381. * Clean up processed DMA descriptors.
  382. *
  383. * This function removes processed DMA descriptors from the "active" list. Pass
  384. * in a non-NULL list head to get the descriptors moved to your list. Pass NULL
  385. * to get the descriptors moved to the channel's "done" list. Descriptors on
  386. * the "done" list can be retrieved with mxs_dma_get_finished().
  387. *
  388. * This function marks the DMA channel as "not busy" if no unprocessed
  389. * descriptors remain on the "active" list.
  390. */
  391. static int mxs_dma_finish(int channel, struct list_head *head)
  392. {
  393. int sem;
  394. struct mxs_dma_chan *pchan;
  395. struct list_head *p, *q;
  396. struct mxs_dma_desc *pdesc;
  397. int ret;
  398. ret = mxs_dma_validate_chan(channel);
  399. if (ret)
  400. return ret;
  401. pchan = mxs_dma_channels + channel;
  402. sem = mxs_dma_read_semaphore(channel);
  403. if (sem < 0)
  404. return sem;
  405. if (sem == pchan->active_num)
  406. return 0;
  407. list_for_each_safe(p, q, &pchan->active) {
  408. if ((pchan->active_num) <= sem)
  409. break;
  410. pdesc = list_entry(p, struct mxs_dma_desc, node);
  411. pdesc->flags &= ~MXS_DMA_DESC_READY;
  412. if (head)
  413. list_move_tail(p, head);
  414. else
  415. list_move_tail(p, &pchan->done);
  416. if (pdesc->flags & MXS_DMA_DESC_LAST)
  417. pchan->active_num--;
  418. }
  419. if (sem == 0)
  420. pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
  421. return 0;
  422. }
  423. /*
  424. * Wait for DMA channel to complete
  425. */
  426. static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
  427. {
  428. struct mxs_apbh_regs *apbh_regs =
  429. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  430. int ret;
  431. ret = mxs_dma_validate_chan(chan);
  432. if (ret)
  433. return ret;
  434. if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
  435. 1 << chan, timeout)) {
  436. ret = -ETIMEDOUT;
  437. mxs_dma_reset(chan);
  438. }
  439. return ret;
  440. }
  441. /*
  442. * Execute the DMA channel
  443. */
  444. int mxs_dma_go(int chan)
  445. {
  446. uint32_t timeout = 10000000;
  447. int ret;
  448. LIST_HEAD(tmp_desc_list);
  449. mxs_dma_enable_irq(chan, 1);
  450. mxs_dma_enable(chan);
  451. /* Wait for DMA to finish. */
  452. ret = mxs_dma_wait_complete(timeout, chan);
  453. /* Clear out the descriptors we just ran. */
  454. mxs_dma_finish(chan, &tmp_desc_list);
  455. /* Shut the DMA channel down. */
  456. mxs_dma_ack_irq(chan);
  457. mxs_dma_reset(chan);
  458. mxs_dma_enable_irq(chan, 0);
  459. mxs_dma_disable(chan);
  460. return ret;
  461. }
  462. /*
  463. * Initialize the DMA hardware
  464. */
  465. void mxs_dma_init(void)
  466. {
  467. struct mxs_apbh_regs *apbh_regs =
  468. (struct mxs_apbh_regs *)MXS_APBH_BASE;
  469. mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
  470. #ifdef CONFIG_APBH_DMA_BURST8
  471. writel(APBH_CTRL0_AHB_BURST8_EN,
  472. &apbh_regs->hw_apbh_ctrl0_set);
  473. #else
  474. writel(APBH_CTRL0_AHB_BURST8_EN,
  475. &apbh_regs->hw_apbh_ctrl0_clr);
  476. #endif
  477. #ifdef CONFIG_APBH_DMA_BURST
  478. writel(APBH_CTRL0_APB_BURST_EN,
  479. &apbh_regs->hw_apbh_ctrl0_set);
  480. #else
  481. writel(APBH_CTRL0_APB_BURST_EN,
  482. &apbh_regs->hw_apbh_ctrl0_clr);
  483. #endif
  484. }
  485. int mxs_dma_init_channel(int channel)
  486. {
  487. struct mxs_dma_chan *pchan;
  488. int ret;
  489. pchan = mxs_dma_channels + channel;
  490. pchan->flags = MXS_DMA_FLAGS_VALID;
  491. ret = mxs_dma_request(channel);
  492. if (ret) {
  493. printf("MXS DMA: Can't acquire DMA channel %i\n",
  494. channel);
  495. return ret;
  496. }
  497. mxs_dma_reset(channel);
  498. mxs_dma_ack_irq(channel);
  499. return 0;
  500. }