musb_gadget.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332
  1. /*
  2. * MUSB OTG driver peripheral support
  3. *
  4. * Copyright 2005 Mentor Graphics Corporation
  5. * Copyright (C) 2005-2006 by Texas Instruments
  6. * Copyright (C) 2006-2007 Nokia Corporation
  7. * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  21. * 02110-1301 USA
  22. *
  23. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  24. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  25. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  26. * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  27. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  28. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  29. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  30. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  32. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. */
  35. #ifndef __UBOOT__
  36. #include <linux/kernel.h>
  37. #include <linux/list.h>
  38. #include <linux/timer.h>
  39. #include <linux/module.h>
  40. #include <linux/smp.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/delay.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/slab.h>
  45. #else
  46. #include <common.h>
  47. #include <linux/usb/ch9.h>
  48. #include "linux-compat.h"
  49. #endif
  50. #include "musb_core.h"
  51. /* MUSB PERIPHERAL status 3-mar-2006:
  52. *
  53. * - EP0 seems solid. It passes both USBCV and usbtest control cases.
  54. * Minor glitches:
  55. *
  56. * + remote wakeup to Linux hosts work, but saw USBCV failures;
  57. * in one test run (operator error?)
  58. * + endpoint halt tests -- in both usbtest and usbcv -- seem
  59. * to break when dma is enabled ... is something wrongly
  60. * clearing SENDSTALL?
  61. *
  62. * - Mass storage behaved ok when last tested. Network traffic patterns
  63. * (with lots of short transfers etc) need retesting; they turn up the
  64. * worst cases of the DMA, since short packets are typical but are not
  65. * required.
  66. *
  67. * - TX/IN
  68. * + both pio and dma behave in with network and g_zero tests
  69. * + no cppi throughput issues other than no-hw-queueing
  70. * + failed with FLAT_REG (DaVinci)
  71. * + seems to behave with double buffering, PIO -and- CPPI
  72. * + with gadgetfs + AIO, requests got lost?
  73. *
  74. * - RX/OUT
  75. * + both pio and dma behave in with network and g_zero tests
  76. * + dma is slow in typical case (short_not_ok is clear)
  77. * + double buffering ok with PIO
  78. * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
  79. * + request lossage observed with gadgetfs
  80. *
  81. * - ISO not tested ... might work, but only weakly isochronous
  82. *
  83. * - Gadget driver disabling of softconnect during bind() is ignored; so
  84. * drivers can't hold off host requests until userspace is ready.
  85. * (Workaround: they can turn it off later.)
  86. *
  87. * - PORTABILITY (assumes PIO works):
  88. * + DaVinci, basically works with cppi dma
  89. * + OMAP 2430, ditto with mentor dma
  90. * + TUSB 6010, platform-specific dma in the works
  91. */
  92. /* ----------------------------------------------------------------------- */
  93. #define is_buffer_mapped(req) (is_dma_capable() && \
  94. (req->map_state != UN_MAPPED))
  95. #ifndef CONFIG_MUSB_PIO_ONLY
  96. /* Maps the buffer to dma */
  97. static inline void map_dma_buffer(struct musb_request *request,
  98. struct musb *musb, struct musb_ep *musb_ep)
  99. {
  100. int compatible = true;
  101. struct dma_controller *dma = musb->dma_controller;
  102. request->map_state = UN_MAPPED;
  103. if (!is_dma_capable() || !musb_ep->dma)
  104. return;
  105. /* Check if DMA engine can handle this request.
  106. * DMA code must reject the USB request explicitly.
  107. * Default behaviour is to map the request.
  108. */
  109. if (dma->is_compatible)
  110. compatible = dma->is_compatible(musb_ep->dma,
  111. musb_ep->packet_sz, request->request.buf,
  112. request->request.length);
  113. if (!compatible)
  114. return;
  115. if (request->request.dma == DMA_ADDR_INVALID) {
  116. request->request.dma = dma_map_single(
  117. musb->controller,
  118. request->request.buf,
  119. request->request.length,
  120. request->tx
  121. ? DMA_TO_DEVICE
  122. : DMA_FROM_DEVICE);
  123. request->map_state = MUSB_MAPPED;
  124. } else {
  125. dma_sync_single_for_device(musb->controller,
  126. request->request.dma,
  127. request->request.length,
  128. request->tx
  129. ? DMA_TO_DEVICE
  130. : DMA_FROM_DEVICE);
  131. request->map_state = PRE_MAPPED;
  132. }
  133. }
  134. /* Unmap the buffer from dma and maps it back to cpu */
  135. static inline void unmap_dma_buffer(struct musb_request *request,
  136. struct musb *musb)
  137. {
  138. if (!is_buffer_mapped(request))
  139. return;
  140. if (request->request.dma == DMA_ADDR_INVALID) {
  141. dev_vdbg(musb->controller,
  142. "not unmapping a never mapped buffer\n");
  143. return;
  144. }
  145. if (request->map_state == MUSB_MAPPED) {
  146. dma_unmap_single(musb->controller,
  147. request->request.dma,
  148. request->request.length,
  149. request->tx
  150. ? DMA_TO_DEVICE
  151. : DMA_FROM_DEVICE);
  152. request->request.dma = DMA_ADDR_INVALID;
  153. } else { /* PRE_MAPPED */
  154. dma_sync_single_for_cpu(musb->controller,
  155. request->request.dma,
  156. request->request.length,
  157. request->tx
  158. ? DMA_TO_DEVICE
  159. : DMA_FROM_DEVICE);
  160. }
  161. request->map_state = UN_MAPPED;
  162. }
  163. #else
  164. static inline void map_dma_buffer(struct musb_request *request,
  165. struct musb *musb, struct musb_ep *musb_ep)
  166. {
  167. }
  168. static inline void unmap_dma_buffer(struct musb_request *request,
  169. struct musb *musb)
  170. {
  171. }
  172. #endif
  173. /*
  174. * Immediately complete a request.
  175. *
  176. * @param request the request to complete
  177. * @param status the status to complete the request with
  178. * Context: controller locked, IRQs blocked.
  179. */
  180. void musb_g_giveback(
  181. struct musb_ep *ep,
  182. struct usb_request *request,
  183. int status)
  184. __releases(ep->musb->lock)
  185. __acquires(ep->musb->lock)
  186. {
  187. struct musb_request *req;
  188. struct musb *musb;
  189. int busy = ep->busy;
  190. req = to_musb_request(request);
  191. list_del(&req->list);
  192. if (req->request.status == -EINPROGRESS)
  193. req->request.status = status;
  194. musb = req->musb;
  195. ep->busy = 1;
  196. spin_unlock(&musb->lock);
  197. unmap_dma_buffer(req, musb);
  198. if (request->status == 0)
  199. dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
  200. ep->end_point.name, request,
  201. req->request.actual, req->request.length);
  202. else
  203. dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
  204. ep->end_point.name, request,
  205. req->request.actual, req->request.length,
  206. request->status);
  207. req->request.complete(&req->ep->end_point, &req->request);
  208. spin_lock(&musb->lock);
  209. ep->busy = busy;
  210. }
  211. /* ----------------------------------------------------------------------- */
  212. /*
  213. * Abort requests queued to an endpoint using the status. Synchronous.
  214. * caller locked controller and blocked irqs, and selected this ep.
  215. */
  216. static void nuke(struct musb_ep *ep, const int status)
  217. {
  218. struct musb *musb = ep->musb;
  219. struct musb_request *req = NULL;
  220. void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
  221. ep->busy = 1;
  222. if (is_dma_capable() && ep->dma) {
  223. struct dma_controller *c = ep->musb->dma_controller;
  224. int value;
  225. if (ep->is_in) {
  226. /*
  227. * The programming guide says that we must not clear
  228. * the DMAMODE bit before DMAENAB, so we only
  229. * clear it in the second write...
  230. */
  231. musb_writew(epio, MUSB_TXCSR,
  232. MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
  233. musb_writew(epio, MUSB_TXCSR,
  234. 0 | MUSB_TXCSR_FLUSHFIFO);
  235. } else {
  236. musb_writew(epio, MUSB_RXCSR,
  237. 0 | MUSB_RXCSR_FLUSHFIFO);
  238. musb_writew(epio, MUSB_RXCSR,
  239. 0 | MUSB_RXCSR_FLUSHFIFO);
  240. }
  241. value = c->channel_abort(ep->dma);
  242. dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
  243. ep->name, value);
  244. c->channel_release(ep->dma);
  245. ep->dma = NULL;
  246. }
  247. while (!list_empty(&ep->req_list)) {
  248. req = list_first_entry(&ep->req_list, struct musb_request, list);
  249. musb_g_giveback(ep, &req->request, status);
  250. }
  251. }
  252. /* ----------------------------------------------------------------------- */
  253. /* Data transfers - pure PIO, pure DMA, or mixed mode */
  254. /*
  255. * This assumes the separate CPPI engine is responding to DMA requests
  256. * from the usb core ... sequenced a bit differently from mentor dma.
  257. */
  258. static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
  259. {
  260. if (can_bulk_split(musb, ep->type))
  261. return ep->hw_ep->max_packet_sz_tx;
  262. else
  263. return ep->packet_sz;
  264. }
  265. #ifdef CONFIG_USB_INVENTRA_DMA
  266. /* Peripheral tx (IN) using Mentor DMA works as follows:
  267. Only mode 0 is used for transfers <= wPktSize,
  268. mode 1 is used for larger transfers,
  269. One of the following happens:
  270. - Host sends IN token which causes an endpoint interrupt
  271. -> TxAvail
  272. -> if DMA is currently busy, exit.
  273. -> if queue is non-empty, txstate().
  274. - Request is queued by the gadget driver.
  275. -> if queue was previously empty, txstate()
  276. txstate()
  277. -> start
  278. /\ -> setup DMA
  279. | (data is transferred to the FIFO, then sent out when
  280. | IN token(s) are recd from Host.
  281. | -> DMA interrupt on completion
  282. | calls TxAvail.
  283. | -> stop DMA, ~DMAENAB,
  284. | -> set TxPktRdy for last short pkt or zlp
  285. | -> Complete Request
  286. | -> Continue next request (call txstate)
  287. |___________________________________|
  288. * Non-Mentor DMA engines can of course work differently, such as by
  289. * upleveling from irq-per-packet to irq-per-buffer.
  290. */
  291. #endif
  292. /*
  293. * An endpoint is transmitting data. This can be called either from
  294. * the IRQ routine or from ep.queue() to kickstart a request on an
  295. * endpoint.
  296. *
  297. * Context: controller locked, IRQs blocked, endpoint selected
  298. */
  299. static void txstate(struct musb *musb, struct musb_request *req)
  300. {
  301. u8 epnum = req->epnum;
  302. struct musb_ep *musb_ep;
  303. void __iomem *epio = musb->endpoints[epnum].regs;
  304. struct usb_request *request;
  305. u16 fifo_count = 0, csr;
  306. int use_dma = 0;
  307. musb_ep = req->ep;
  308. /* Check if EP is disabled */
  309. if (!musb_ep->desc) {
  310. dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
  311. musb_ep->end_point.name);
  312. return;
  313. }
  314. /* we shouldn't get here while DMA is active ... but we do ... */
  315. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  316. dev_dbg(musb->controller, "dma pending...\n");
  317. return;
  318. }
  319. /* read TXCSR before */
  320. csr = musb_readw(epio, MUSB_TXCSR);
  321. request = &req->request;
  322. fifo_count = min(max_ep_writesize(musb, musb_ep),
  323. (int)(request->length - request->actual));
  324. if (csr & MUSB_TXCSR_TXPKTRDY) {
  325. dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
  326. musb_ep->end_point.name, csr);
  327. return;
  328. }
  329. if (csr & MUSB_TXCSR_P_SENDSTALL) {
  330. dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
  331. musb_ep->end_point.name, csr);
  332. return;
  333. }
  334. dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
  335. epnum, musb_ep->packet_sz, fifo_count,
  336. csr);
  337. #ifndef CONFIG_MUSB_PIO_ONLY
  338. if (is_buffer_mapped(req)) {
  339. struct dma_controller *c = musb->dma_controller;
  340. size_t request_size;
  341. /* setup DMA, then program endpoint CSR */
  342. request_size = min_t(size_t, request->length - request->actual,
  343. musb_ep->dma->max_len);
  344. use_dma = (request->dma != DMA_ADDR_INVALID);
  345. /* MUSB_TXCSR_P_ISO is still set correctly */
  346. #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
  347. {
  348. if (request_size < musb_ep->packet_sz)
  349. musb_ep->dma->desired_mode = 0;
  350. else
  351. musb_ep->dma->desired_mode = 1;
  352. use_dma = use_dma && c->channel_program(
  353. musb_ep->dma, musb_ep->packet_sz,
  354. musb_ep->dma->desired_mode,
  355. request->dma + request->actual, request_size);
  356. if (use_dma) {
  357. if (musb_ep->dma->desired_mode == 0) {
  358. /*
  359. * We must not clear the DMAMODE bit
  360. * before the DMAENAB bit -- and the
  361. * latter doesn't always get cleared
  362. * before we get here...
  363. */
  364. csr &= ~(MUSB_TXCSR_AUTOSET
  365. | MUSB_TXCSR_DMAENAB);
  366. musb_writew(epio, MUSB_TXCSR, csr
  367. | MUSB_TXCSR_P_WZC_BITS);
  368. csr &= ~MUSB_TXCSR_DMAMODE;
  369. csr |= (MUSB_TXCSR_DMAENAB |
  370. MUSB_TXCSR_MODE);
  371. /* against programming guide */
  372. } else {
  373. csr |= (MUSB_TXCSR_DMAENAB
  374. | MUSB_TXCSR_DMAMODE
  375. | MUSB_TXCSR_MODE);
  376. if (!musb_ep->hb_mult)
  377. csr |= MUSB_TXCSR_AUTOSET;
  378. }
  379. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  380. musb_writew(epio, MUSB_TXCSR, csr);
  381. }
  382. }
  383. #elif defined(CONFIG_USB_TI_CPPI_DMA)
  384. /* program endpoint CSR first, then setup DMA */
  385. csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
  386. csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
  387. MUSB_TXCSR_MODE;
  388. musb_writew(epio, MUSB_TXCSR,
  389. (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
  390. | csr);
  391. /* ensure writebuffer is empty */
  392. csr = musb_readw(epio, MUSB_TXCSR);
  393. /* NOTE host side sets DMAENAB later than this; both are
  394. * OK since the transfer dma glue (between CPPI and Mentor
  395. * fifos) just tells CPPI it could start. Data only moves
  396. * to the USB TX fifo when both fifos are ready.
  397. */
  398. /* "mode" is irrelevant here; handle terminating ZLPs like
  399. * PIO does, since the hardware RNDIS mode seems unreliable
  400. * except for the last-packet-is-already-short case.
  401. */
  402. use_dma = use_dma && c->channel_program(
  403. musb_ep->dma, musb_ep->packet_sz,
  404. 0,
  405. request->dma + request->actual,
  406. request_size);
  407. if (!use_dma) {
  408. c->channel_release(musb_ep->dma);
  409. musb_ep->dma = NULL;
  410. csr &= ~MUSB_TXCSR_DMAENAB;
  411. musb_writew(epio, MUSB_TXCSR, csr);
  412. /* invariant: prequest->buf is non-null */
  413. }
  414. #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
  415. use_dma = use_dma && c->channel_program(
  416. musb_ep->dma, musb_ep->packet_sz,
  417. request->zero,
  418. request->dma + request->actual,
  419. request_size);
  420. #endif
  421. }
  422. #endif
  423. if (!use_dma) {
  424. /*
  425. * Unmap the dma buffer back to cpu if dma channel
  426. * programming fails
  427. */
  428. unmap_dma_buffer(req, musb);
  429. musb_write_fifo(musb_ep->hw_ep, fifo_count,
  430. (u8 *) (request->buf + request->actual));
  431. request->actual += fifo_count;
  432. csr |= MUSB_TXCSR_TXPKTRDY;
  433. csr &= ~MUSB_TXCSR_P_UNDERRUN;
  434. musb_writew(epio, MUSB_TXCSR, csr);
  435. }
  436. /* host may already have the data when this message shows... */
  437. dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
  438. musb_ep->end_point.name, use_dma ? "dma" : "pio",
  439. request->actual, request->length,
  440. musb_readw(epio, MUSB_TXCSR),
  441. fifo_count,
  442. musb_readw(epio, MUSB_TXMAXP));
  443. }
  444. /*
  445. * FIFO state update (e.g. data ready).
  446. * Called from IRQ, with controller locked.
  447. */
  448. void musb_g_tx(struct musb *musb, u8 epnum)
  449. {
  450. u16 csr;
  451. struct musb_request *req;
  452. struct usb_request *request;
  453. u8 __iomem *mbase = musb->mregs;
  454. struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
  455. void __iomem *epio = musb->endpoints[epnum].regs;
  456. struct dma_channel *dma;
  457. musb_ep_select(mbase, epnum);
  458. req = next_request(musb_ep);
  459. request = &req->request;
  460. csr = musb_readw(epio, MUSB_TXCSR);
  461. dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
  462. dma = is_dma_capable() ? musb_ep->dma : NULL;
  463. /*
  464. * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
  465. * probably rates reporting as a host error.
  466. */
  467. if (csr & MUSB_TXCSR_P_SENTSTALL) {
  468. csr |= MUSB_TXCSR_P_WZC_BITS;
  469. csr &= ~MUSB_TXCSR_P_SENTSTALL;
  470. musb_writew(epio, MUSB_TXCSR, csr);
  471. return;
  472. }
  473. if (csr & MUSB_TXCSR_P_UNDERRUN) {
  474. /* We NAKed, no big deal... little reason to care. */
  475. csr |= MUSB_TXCSR_P_WZC_BITS;
  476. csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
  477. musb_writew(epio, MUSB_TXCSR, csr);
  478. dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
  479. epnum, request);
  480. }
  481. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  482. /*
  483. * SHOULD NOT HAPPEN... has with CPPI though, after
  484. * changing SENDSTALL (and other cases); harmless?
  485. */
  486. dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
  487. return;
  488. }
  489. if (request) {
  490. u8 is_dma = 0;
  491. if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
  492. is_dma = 1;
  493. csr |= MUSB_TXCSR_P_WZC_BITS;
  494. csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
  495. MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
  496. musb_writew(epio, MUSB_TXCSR, csr);
  497. /* Ensure writebuffer is empty. */
  498. csr = musb_readw(epio, MUSB_TXCSR);
  499. request->actual += musb_ep->dma->actual_len;
  500. dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
  501. epnum, csr, musb_ep->dma->actual_len, request);
  502. }
  503. /*
  504. * First, maybe a terminating short packet. Some DMA
  505. * engines might handle this by themselves.
  506. */
  507. if ((request->zero && request->length
  508. && (request->length % musb_ep->packet_sz == 0)
  509. && (request->actual == request->length))
  510. #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
  511. || (is_dma && (!dma->desired_mode ||
  512. (request->actual &
  513. (musb_ep->packet_sz - 1))))
  514. #endif
  515. ) {
  516. /*
  517. * On DMA completion, FIFO may not be
  518. * available yet...
  519. */
  520. if (csr & MUSB_TXCSR_TXPKTRDY)
  521. return;
  522. dev_dbg(musb->controller, "sending zero pkt\n");
  523. musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
  524. | MUSB_TXCSR_TXPKTRDY);
  525. request->zero = 0;
  526. }
  527. if (request->actual == request->length) {
  528. musb_g_giveback(musb_ep, request, 0);
  529. /*
  530. * In the giveback function the MUSB lock is
  531. * released and acquired after sometime. During
  532. * this time period the INDEX register could get
  533. * changed by the gadget_queue function especially
  534. * on SMP systems. Reselect the INDEX to be sure
  535. * we are reading/modifying the right registers
  536. */
  537. musb_ep_select(mbase, epnum);
  538. req = musb_ep->desc ? next_request(musb_ep) : NULL;
  539. if (!req) {
  540. dev_dbg(musb->controller, "%s idle now\n",
  541. musb_ep->end_point.name);
  542. return;
  543. }
  544. }
  545. txstate(musb, req);
  546. }
  547. }
  548. /* ------------------------------------------------------------ */
  549. #ifdef CONFIG_USB_INVENTRA_DMA
  550. /* Peripheral rx (OUT) using Mentor DMA works as follows:
  551. - Only mode 0 is used.
  552. - Request is queued by the gadget class driver.
  553. -> if queue was previously empty, rxstate()
  554. - Host sends OUT token which causes an endpoint interrupt
  555. /\ -> RxReady
  556. | -> if request queued, call rxstate
  557. | /\ -> setup DMA
  558. | | -> DMA interrupt on completion
  559. | | -> RxReady
  560. | | -> stop DMA
  561. | | -> ack the read
  562. | | -> if data recd = max expected
  563. | | by the request, or host
  564. | | sent a short packet,
  565. | | complete the request,
  566. | | and start the next one.
  567. | |_____________________________________|
  568. | else just wait for the host
  569. | to send the next OUT token.
  570. |__________________________________________________|
  571. * Non-Mentor DMA engines can of course work differently.
  572. */
  573. #endif
  574. /*
  575. * Context: controller locked, IRQs blocked, endpoint selected
  576. */
  577. static void rxstate(struct musb *musb, struct musb_request *req)
  578. {
  579. const u8 epnum = req->epnum;
  580. struct usb_request *request = &req->request;
  581. struct musb_ep *musb_ep;
  582. void __iomem *epio = musb->endpoints[epnum].regs;
  583. unsigned fifo_count = 0;
  584. u16 len;
  585. u16 csr = musb_readw(epio, MUSB_RXCSR);
  586. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  587. u8 use_mode_1;
  588. if (hw_ep->is_shared_fifo)
  589. musb_ep = &hw_ep->ep_in;
  590. else
  591. musb_ep = &hw_ep->ep_out;
  592. len = musb_ep->packet_sz;
  593. /* Check if EP is disabled */
  594. if (!musb_ep->desc) {
  595. dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
  596. musb_ep->end_point.name);
  597. return;
  598. }
  599. /* We shouldn't get here while DMA is active, but we do... */
  600. if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
  601. dev_dbg(musb->controller, "DMA pending...\n");
  602. return;
  603. }
  604. if (csr & MUSB_RXCSR_P_SENDSTALL) {
  605. dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
  606. musb_ep->end_point.name, csr);
  607. return;
  608. }
  609. if (is_cppi_enabled() && is_buffer_mapped(req)) {
  610. struct dma_controller *c = musb->dma_controller;
  611. struct dma_channel *channel = musb_ep->dma;
  612. /* NOTE: CPPI won't actually stop advancing the DMA
  613. * queue after short packet transfers, so this is almost
  614. * always going to run as IRQ-per-packet DMA so that
  615. * faults will be handled correctly.
  616. */
  617. if (c->channel_program(channel,
  618. musb_ep->packet_sz,
  619. !request->short_not_ok,
  620. request->dma + request->actual,
  621. request->length - request->actual)) {
  622. /* make sure that if an rxpkt arrived after the irq,
  623. * the cppi engine will be ready to take it as soon
  624. * as DMA is enabled
  625. */
  626. csr &= ~(MUSB_RXCSR_AUTOCLEAR
  627. | MUSB_RXCSR_DMAMODE);
  628. csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
  629. musb_writew(epio, MUSB_RXCSR, csr);
  630. return;
  631. }
  632. }
  633. if (csr & MUSB_RXCSR_RXPKTRDY) {
  634. len = musb_readw(epio, MUSB_RXCOUNT);
  635. /*
  636. * Enable Mode 1 on RX transfers only when short_not_ok flag
  637. * is set. Currently short_not_ok flag is set only from
  638. * file_storage and f_mass_storage drivers
  639. */
  640. if (request->short_not_ok && len == musb_ep->packet_sz)
  641. use_mode_1 = 1;
  642. else
  643. use_mode_1 = 0;
  644. if (request->actual < request->length) {
  645. #ifdef CONFIG_USB_INVENTRA_DMA
  646. if (is_buffer_mapped(req)) {
  647. struct dma_controller *c;
  648. struct dma_channel *channel;
  649. int use_dma = 0;
  650. c = musb->dma_controller;
  651. channel = musb_ep->dma;
  652. /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
  653. * mode 0 only. So we do not get endpoint interrupts due to DMA
  654. * completion. We only get interrupts from DMA controller.
  655. *
  656. * We could operate in DMA mode 1 if we knew the size of the tranfer
  657. * in advance. For mass storage class, request->length = what the host
  658. * sends, so that'd work. But for pretty much everything else,
  659. * request->length is routinely more than what the host sends. For
  660. * most these gadgets, end of is signified either by a short packet,
  661. * or filling the last byte of the buffer. (Sending extra data in
  662. * that last pckate should trigger an overflow fault.) But in mode 1,
  663. * we don't get DMA completion interrupt for short packets.
  664. *
  665. * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
  666. * to get endpoint interrupt on every DMA req, but that didn't seem
  667. * to work reliably.
  668. *
  669. * REVISIT an updated g_file_storage can set req->short_not_ok, which
  670. * then becomes usable as a runtime "use mode 1" hint...
  671. */
  672. /* Experimental: Mode1 works with mass storage use cases */
  673. if (use_mode_1) {
  674. csr |= MUSB_RXCSR_AUTOCLEAR;
  675. musb_writew(epio, MUSB_RXCSR, csr);
  676. csr |= MUSB_RXCSR_DMAENAB;
  677. musb_writew(epio, MUSB_RXCSR, csr);
  678. /*
  679. * this special sequence (enabling and then
  680. * disabling MUSB_RXCSR_DMAMODE) is required
  681. * to get DMAReq to activate
  682. */
  683. musb_writew(epio, MUSB_RXCSR,
  684. csr | MUSB_RXCSR_DMAMODE);
  685. musb_writew(epio, MUSB_RXCSR, csr);
  686. } else {
  687. if (!musb_ep->hb_mult &&
  688. musb_ep->hw_ep->rx_double_buffered)
  689. csr |= MUSB_RXCSR_AUTOCLEAR;
  690. csr |= MUSB_RXCSR_DMAENAB;
  691. musb_writew(epio, MUSB_RXCSR, csr);
  692. }
  693. if (request->actual < request->length) {
  694. int transfer_size = 0;
  695. if (use_mode_1) {
  696. transfer_size = min(request->length - request->actual,
  697. channel->max_len);
  698. musb_ep->dma->desired_mode = 1;
  699. } else {
  700. transfer_size = min(request->length - request->actual,
  701. (unsigned)len);
  702. musb_ep->dma->desired_mode = 0;
  703. }
  704. use_dma = c->channel_program(
  705. channel,
  706. musb_ep->packet_sz,
  707. channel->desired_mode,
  708. request->dma
  709. + request->actual,
  710. transfer_size);
  711. }
  712. if (use_dma)
  713. return;
  714. }
  715. #elif defined(CONFIG_USB_UX500_DMA)
  716. if ((is_buffer_mapped(req)) &&
  717. (request->actual < request->length)) {
  718. struct dma_controller *c;
  719. struct dma_channel *channel;
  720. int transfer_size = 0;
  721. c = musb->dma_controller;
  722. channel = musb_ep->dma;
  723. /* In case first packet is short */
  724. if (len < musb_ep->packet_sz)
  725. transfer_size = len;
  726. else if (request->short_not_ok)
  727. transfer_size = min(request->length -
  728. request->actual,
  729. channel->max_len);
  730. else
  731. transfer_size = min(request->length -
  732. request->actual,
  733. (unsigned)len);
  734. csr &= ~MUSB_RXCSR_DMAMODE;
  735. csr |= (MUSB_RXCSR_DMAENAB |
  736. MUSB_RXCSR_AUTOCLEAR);
  737. musb_writew(epio, MUSB_RXCSR, csr);
  738. if (transfer_size <= musb_ep->packet_sz) {
  739. musb_ep->dma->desired_mode = 0;
  740. } else {
  741. musb_ep->dma->desired_mode = 1;
  742. /* Mode must be set after DMAENAB */
  743. csr |= MUSB_RXCSR_DMAMODE;
  744. musb_writew(epio, MUSB_RXCSR, csr);
  745. }
  746. if (c->channel_program(channel,
  747. musb_ep->packet_sz,
  748. channel->desired_mode,
  749. request->dma
  750. + request->actual,
  751. transfer_size))
  752. return;
  753. }
  754. #endif /* Mentor's DMA */
  755. fifo_count = request->length - request->actual;
  756. dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
  757. musb_ep->end_point.name,
  758. len, fifo_count,
  759. musb_ep->packet_sz);
  760. fifo_count = min_t(unsigned, len, fifo_count);
  761. #ifdef CONFIG_USB_TUSB_OMAP_DMA
  762. if (tusb_dma_omap() && is_buffer_mapped(req)) {
  763. struct dma_controller *c = musb->dma_controller;
  764. struct dma_channel *channel = musb_ep->dma;
  765. u32 dma_addr = request->dma + request->actual;
  766. int ret;
  767. ret = c->channel_program(channel,
  768. musb_ep->packet_sz,
  769. channel->desired_mode,
  770. dma_addr,
  771. fifo_count);
  772. if (ret)
  773. return;
  774. }
  775. #endif
  776. /*
  777. * Unmap the dma buffer back to cpu if dma channel
  778. * programming fails. This buffer is mapped if the
  779. * channel allocation is successful
  780. */
  781. if (is_buffer_mapped(req)) {
  782. unmap_dma_buffer(req, musb);
  783. /*
  784. * Clear DMAENAB and AUTOCLEAR for the
  785. * PIO mode transfer
  786. */
  787. csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
  788. musb_writew(epio, MUSB_RXCSR, csr);
  789. }
  790. musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
  791. (request->buf + request->actual));
  792. request->actual += fifo_count;
  793. /* REVISIT if we left anything in the fifo, flush
  794. * it and report -EOVERFLOW
  795. */
  796. /* ack the read! */
  797. csr |= MUSB_RXCSR_P_WZC_BITS;
  798. csr &= ~MUSB_RXCSR_RXPKTRDY;
  799. musb_writew(epio, MUSB_RXCSR, csr);
  800. }
  801. }
  802. /* reach the end or short packet detected */
  803. if (request->actual == request->length || len < musb_ep->packet_sz)
  804. musb_g_giveback(musb_ep, request, 0);
  805. }
  806. /*
  807. * Data ready for a request; called from IRQ
  808. */
  809. void musb_g_rx(struct musb *musb, u8 epnum)
  810. {
  811. u16 csr;
  812. struct musb_request *req;
  813. struct usb_request *request;
  814. void __iomem *mbase = musb->mregs;
  815. struct musb_ep *musb_ep;
  816. void __iomem *epio = musb->endpoints[epnum].regs;
  817. struct dma_channel *dma;
  818. struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
  819. if (hw_ep->is_shared_fifo)
  820. musb_ep = &hw_ep->ep_in;
  821. else
  822. musb_ep = &hw_ep->ep_out;
  823. musb_ep_select(mbase, epnum);
  824. req = next_request(musb_ep);
  825. if (!req)
  826. return;
  827. request = &req->request;
  828. csr = musb_readw(epio, MUSB_RXCSR);
  829. dma = is_dma_capable() ? musb_ep->dma : NULL;
  830. dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
  831. csr, dma ? " (dma)" : "", request);
  832. if (csr & MUSB_RXCSR_P_SENTSTALL) {
  833. csr |= MUSB_RXCSR_P_WZC_BITS;
  834. csr &= ~MUSB_RXCSR_P_SENTSTALL;
  835. musb_writew(epio, MUSB_RXCSR, csr);
  836. return;
  837. }
  838. if (csr & MUSB_RXCSR_P_OVERRUN) {
  839. /* csr |= MUSB_RXCSR_P_WZC_BITS; */
  840. csr &= ~MUSB_RXCSR_P_OVERRUN;
  841. musb_writew(epio, MUSB_RXCSR, csr);
  842. dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
  843. if (request->status == -EINPROGRESS)
  844. request->status = -EOVERFLOW;
  845. }
  846. if (csr & MUSB_RXCSR_INCOMPRX) {
  847. /* REVISIT not necessarily an error */
  848. dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
  849. }
  850. if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
  851. /* "should not happen"; likely RXPKTRDY pending for DMA */
  852. dev_dbg(musb->controller, "%s busy, csr %04x\n",
  853. musb_ep->end_point.name, csr);
  854. return;
  855. }
  856. if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
  857. csr &= ~(MUSB_RXCSR_AUTOCLEAR
  858. | MUSB_RXCSR_DMAENAB
  859. | MUSB_RXCSR_DMAMODE);
  860. musb_writew(epio, MUSB_RXCSR,
  861. MUSB_RXCSR_P_WZC_BITS | csr);
  862. request->actual += musb_ep->dma->actual_len;
  863. dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
  864. epnum, csr,
  865. musb_readw(epio, MUSB_RXCSR),
  866. musb_ep->dma->actual_len, request);
  867. #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
  868. defined(CONFIG_USB_UX500_DMA)
  869. /* Autoclear doesn't clear RxPktRdy for short packets */
  870. if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
  871. || (dma->actual_len
  872. & (musb_ep->packet_sz - 1))) {
  873. /* ack the read! */
  874. csr &= ~MUSB_RXCSR_RXPKTRDY;
  875. musb_writew(epio, MUSB_RXCSR, csr);
  876. }
  877. /* incomplete, and not short? wait for next IN packet */
  878. if ((request->actual < request->length)
  879. && (musb_ep->dma->actual_len
  880. == musb_ep->packet_sz)) {
  881. /* In double buffer case, continue to unload fifo if
  882. * there is Rx packet in FIFO.
  883. **/
  884. csr = musb_readw(epio, MUSB_RXCSR);
  885. if ((csr & MUSB_RXCSR_RXPKTRDY) &&
  886. hw_ep->rx_double_buffered)
  887. goto exit;
  888. return;
  889. }
  890. #endif
  891. musb_g_giveback(musb_ep, request, 0);
  892. /*
  893. * In the giveback function the MUSB lock is
  894. * released and acquired after sometime. During
  895. * this time period the INDEX register could get
  896. * changed by the gadget_queue function especially
  897. * on SMP systems. Reselect the INDEX to be sure
  898. * we are reading/modifying the right registers
  899. */
  900. musb_ep_select(mbase, epnum);
  901. req = next_request(musb_ep);
  902. if (!req)
  903. return;
  904. }
  905. #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
  906. defined(CONFIG_USB_UX500_DMA)
  907. exit:
  908. #endif
  909. /* Analyze request */
  910. rxstate(musb, req);
  911. }
  912. /* ------------------------------------------------------------ */
  913. static int musb_gadget_enable(struct usb_ep *ep,
  914. const struct usb_endpoint_descriptor *desc)
  915. {
  916. unsigned long flags;
  917. struct musb_ep *musb_ep;
  918. struct musb_hw_ep *hw_ep;
  919. void __iomem *regs;
  920. struct musb *musb;
  921. void __iomem *mbase;
  922. u8 epnum;
  923. u16 csr;
  924. unsigned tmp;
  925. int status = -EINVAL;
  926. if (!ep || !desc)
  927. return -EINVAL;
  928. musb_ep = to_musb_ep(ep);
  929. hw_ep = musb_ep->hw_ep;
  930. regs = hw_ep->regs;
  931. musb = musb_ep->musb;
  932. mbase = musb->mregs;
  933. epnum = musb_ep->current_epnum;
  934. spin_lock_irqsave(&musb->lock, flags);
  935. if (musb_ep->desc) {
  936. status = -EBUSY;
  937. goto fail;
  938. }
  939. musb_ep->type = usb_endpoint_type(desc);
  940. /* check direction and (later) maxpacket size against endpoint */
  941. if (usb_endpoint_num(desc) != epnum)
  942. goto fail;
  943. /* REVISIT this rules out high bandwidth periodic transfers */
  944. tmp = usb_endpoint_maxp(desc);
  945. if (tmp & ~0x07ff) {
  946. int ok;
  947. if (usb_endpoint_dir_in(desc))
  948. ok = musb->hb_iso_tx;
  949. else
  950. ok = musb->hb_iso_rx;
  951. if (!ok) {
  952. dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
  953. goto fail;
  954. }
  955. musb_ep->hb_mult = (tmp >> 11) & 3;
  956. } else {
  957. musb_ep->hb_mult = 0;
  958. }
  959. musb_ep->packet_sz = tmp & 0x7ff;
  960. tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
  961. /* enable the interrupts for the endpoint, set the endpoint
  962. * packet size (or fail), set the mode, clear the fifo
  963. */
  964. musb_ep_select(mbase, epnum);
  965. if (usb_endpoint_dir_in(desc)) {
  966. u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
  967. if (hw_ep->is_shared_fifo)
  968. musb_ep->is_in = 1;
  969. if (!musb_ep->is_in)
  970. goto fail;
  971. if (tmp > hw_ep->max_packet_sz_tx) {
  972. dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
  973. goto fail;
  974. }
  975. int_txe |= (1 << epnum);
  976. musb_writew(mbase, MUSB_INTRTXE, int_txe);
  977. /* REVISIT if can_bulk_split(), use by updating "tmp";
  978. * likewise high bandwidth periodic tx
  979. */
  980. /* Set TXMAXP with the FIFO size of the endpoint
  981. * to disable double buffering mode.
  982. */
  983. if (musb->double_buffer_not_ok)
  984. musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
  985. else
  986. musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
  987. | (musb_ep->hb_mult << 11));
  988. csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
  989. if (musb_readw(regs, MUSB_TXCSR)
  990. & MUSB_TXCSR_FIFONOTEMPTY)
  991. csr |= MUSB_TXCSR_FLUSHFIFO;
  992. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  993. csr |= MUSB_TXCSR_P_ISO;
  994. /* set twice in case of double buffering */
  995. musb_writew(regs, MUSB_TXCSR, csr);
  996. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  997. musb_writew(regs, MUSB_TXCSR, csr);
  998. } else {
  999. u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
  1000. if (hw_ep->is_shared_fifo)
  1001. musb_ep->is_in = 0;
  1002. if (musb_ep->is_in)
  1003. goto fail;
  1004. if (tmp > hw_ep->max_packet_sz_rx) {
  1005. dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
  1006. goto fail;
  1007. }
  1008. int_rxe |= (1 << epnum);
  1009. musb_writew(mbase, MUSB_INTRRXE, int_rxe);
  1010. /* REVISIT if can_bulk_combine() use by updating "tmp"
  1011. * likewise high bandwidth periodic rx
  1012. */
  1013. /* Set RXMAXP with the FIFO size of the endpoint
  1014. * to disable double buffering mode.
  1015. */
  1016. if (musb->double_buffer_not_ok)
  1017. musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
  1018. else
  1019. musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
  1020. | (musb_ep->hb_mult << 11));
  1021. /* force shared fifo to OUT-only mode */
  1022. if (hw_ep->is_shared_fifo) {
  1023. csr = musb_readw(regs, MUSB_TXCSR);
  1024. csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
  1025. musb_writew(regs, MUSB_TXCSR, csr);
  1026. }
  1027. csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
  1028. if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
  1029. csr |= MUSB_RXCSR_P_ISO;
  1030. else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
  1031. csr |= MUSB_RXCSR_DISNYET;
  1032. /* set twice in case of double buffering */
  1033. musb_writew(regs, MUSB_RXCSR, csr);
  1034. musb_writew(regs, MUSB_RXCSR, csr);
  1035. }
  1036. /* NOTE: all the I/O code _should_ work fine without DMA, in case
  1037. * for some reason you run out of channels here.
  1038. */
  1039. if (is_dma_capable() && musb->dma_controller) {
  1040. struct dma_controller *c = musb->dma_controller;
  1041. musb_ep->dma = c->channel_alloc(c, hw_ep,
  1042. (desc->bEndpointAddress & USB_DIR_IN));
  1043. } else
  1044. musb_ep->dma = NULL;
  1045. musb_ep->desc = desc;
  1046. musb_ep->busy = 0;
  1047. musb_ep->wedged = 0;
  1048. status = 0;
  1049. pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
  1050. musb_driver_name, musb_ep->end_point.name,
  1051. ({ char *s; switch (musb_ep->type) {
  1052. case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
  1053. case USB_ENDPOINT_XFER_INT: s = "int"; break;
  1054. default: s = "iso"; break;
  1055. }; s; }),
  1056. musb_ep->is_in ? "IN" : "OUT",
  1057. musb_ep->dma ? "dma, " : "",
  1058. musb_ep->packet_sz);
  1059. schedule_work(&musb->irq_work);
  1060. fail:
  1061. spin_unlock_irqrestore(&musb->lock, flags);
  1062. return status;
  1063. }
  1064. /*
  1065. * Disable an endpoint flushing all requests queued.
  1066. */
  1067. static int musb_gadget_disable(struct usb_ep *ep)
  1068. {
  1069. unsigned long flags;
  1070. struct musb *musb;
  1071. u8 epnum;
  1072. struct musb_ep *musb_ep;
  1073. void __iomem *epio;
  1074. int status = 0;
  1075. musb_ep = to_musb_ep(ep);
  1076. musb = musb_ep->musb;
  1077. epnum = musb_ep->current_epnum;
  1078. epio = musb->endpoints[epnum].regs;
  1079. spin_lock_irqsave(&musb->lock, flags);
  1080. musb_ep_select(musb->mregs, epnum);
  1081. /* zero the endpoint sizes */
  1082. if (musb_ep->is_in) {
  1083. u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
  1084. int_txe &= ~(1 << epnum);
  1085. musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
  1086. musb_writew(epio, MUSB_TXMAXP, 0);
  1087. } else {
  1088. u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
  1089. int_rxe &= ~(1 << epnum);
  1090. musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
  1091. musb_writew(epio, MUSB_RXMAXP, 0);
  1092. }
  1093. musb_ep->desc = NULL;
  1094. #ifndef __UBOOT__
  1095. musb_ep->end_point.desc = NULL;
  1096. #endif
  1097. /* abort all pending DMA and requests */
  1098. nuke(musb_ep, -ESHUTDOWN);
  1099. schedule_work(&musb->irq_work);
  1100. spin_unlock_irqrestore(&(musb->lock), flags);
  1101. dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
  1102. return status;
  1103. }
  1104. /*
  1105. * Allocate a request for an endpoint.
  1106. * Reused by ep0 code.
  1107. */
  1108. struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  1109. {
  1110. struct musb_ep *musb_ep = to_musb_ep(ep);
  1111. struct musb *musb = musb_ep->musb;
  1112. struct musb_request *request = NULL;
  1113. request = kzalloc(sizeof *request, gfp_flags);
  1114. if (!request) {
  1115. dev_dbg(musb->controller, "not enough memory\n");
  1116. return NULL;
  1117. }
  1118. request->request.dma = DMA_ADDR_INVALID;
  1119. request->epnum = musb_ep->current_epnum;
  1120. request->ep = musb_ep;
  1121. return &request->request;
  1122. }
  1123. /*
  1124. * Free a request
  1125. * Reused by ep0 code.
  1126. */
  1127. void musb_free_request(struct usb_ep *ep, struct usb_request *req)
  1128. {
  1129. kfree(to_musb_request(req));
  1130. }
  1131. static LIST_HEAD(buffers);
  1132. struct free_record {
  1133. struct list_head list;
  1134. struct device *dev;
  1135. unsigned bytes;
  1136. dma_addr_t dma;
  1137. };
  1138. /*
  1139. * Context: controller locked, IRQs blocked.
  1140. */
  1141. void musb_ep_restart(struct musb *musb, struct musb_request *req)
  1142. {
  1143. dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
  1144. req->tx ? "TX/IN" : "RX/OUT",
  1145. &req->request, req->request.length, req->epnum);
  1146. musb_ep_select(musb->mregs, req->epnum);
  1147. if (req->tx)
  1148. txstate(musb, req);
  1149. else
  1150. rxstate(musb, req);
  1151. }
  1152. static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
  1153. gfp_t gfp_flags)
  1154. {
  1155. struct musb_ep *musb_ep;
  1156. struct musb_request *request;
  1157. struct musb *musb;
  1158. int status = 0;
  1159. unsigned long lockflags;
  1160. if (!ep || !req)
  1161. return -EINVAL;
  1162. if (!req->buf)
  1163. return -ENODATA;
  1164. musb_ep = to_musb_ep(ep);
  1165. musb = musb_ep->musb;
  1166. request = to_musb_request(req);
  1167. request->musb = musb;
  1168. if (request->ep != musb_ep)
  1169. return -EINVAL;
  1170. dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
  1171. /* request is mine now... */
  1172. request->request.actual = 0;
  1173. request->request.status = -EINPROGRESS;
  1174. request->epnum = musb_ep->current_epnum;
  1175. request->tx = musb_ep->is_in;
  1176. map_dma_buffer(request, musb, musb_ep);
  1177. spin_lock_irqsave(&musb->lock, lockflags);
  1178. /* don't queue if the ep is down */
  1179. if (!musb_ep->desc) {
  1180. dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
  1181. req, ep->name, "disabled");
  1182. status = -ESHUTDOWN;
  1183. goto cleanup;
  1184. }
  1185. /* add request to the list */
  1186. list_add_tail(&request->list, &musb_ep->req_list);
  1187. /* it this is the head of the queue, start i/o ... */
  1188. if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
  1189. musb_ep_restart(musb, request);
  1190. cleanup:
  1191. spin_unlock_irqrestore(&musb->lock, lockflags);
  1192. return status;
  1193. }
  1194. static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
  1195. {
  1196. struct musb_ep *musb_ep = to_musb_ep(ep);
  1197. struct musb_request *req = to_musb_request(request);
  1198. struct musb_request *r;
  1199. unsigned long flags;
  1200. int status = 0;
  1201. struct musb *musb = musb_ep->musb;
  1202. if (!ep || !request || to_musb_request(request)->ep != musb_ep)
  1203. return -EINVAL;
  1204. spin_lock_irqsave(&musb->lock, flags);
  1205. list_for_each_entry(r, &musb_ep->req_list, list) {
  1206. if (r == req)
  1207. break;
  1208. }
  1209. if (r != req) {
  1210. dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
  1211. status = -EINVAL;
  1212. goto done;
  1213. }
  1214. /* if the hardware doesn't have the request, easy ... */
  1215. if (musb_ep->req_list.next != &req->list || musb_ep->busy)
  1216. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1217. /* ... else abort the dma transfer ... */
  1218. else if (is_dma_capable() && musb_ep->dma) {
  1219. struct dma_controller *c = musb->dma_controller;
  1220. musb_ep_select(musb->mregs, musb_ep->current_epnum);
  1221. if (c->channel_abort)
  1222. status = c->channel_abort(musb_ep->dma);
  1223. else
  1224. status = -EBUSY;
  1225. if (status == 0)
  1226. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1227. } else {
  1228. /* NOTE: by sticking to easily tested hardware/driver states,
  1229. * we leave counting of in-flight packets imprecise.
  1230. */
  1231. musb_g_giveback(musb_ep, request, -ECONNRESET);
  1232. }
  1233. done:
  1234. spin_unlock_irqrestore(&musb->lock, flags);
  1235. return status;
  1236. }
  1237. /*
  1238. * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
  1239. * data but will queue requests.
  1240. *
  1241. * exported to ep0 code
  1242. */
  1243. static int musb_gadget_set_halt(struct usb_ep *ep, int value)
  1244. {
  1245. struct musb_ep *musb_ep = to_musb_ep(ep);
  1246. u8 epnum = musb_ep->current_epnum;
  1247. struct musb *musb = musb_ep->musb;
  1248. void __iomem *epio = musb->endpoints[epnum].regs;
  1249. void __iomem *mbase;
  1250. unsigned long flags;
  1251. u16 csr;
  1252. struct musb_request *request;
  1253. int status = 0;
  1254. if (!ep)
  1255. return -EINVAL;
  1256. mbase = musb->mregs;
  1257. spin_lock_irqsave(&musb->lock, flags);
  1258. if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
  1259. status = -EINVAL;
  1260. goto done;
  1261. }
  1262. musb_ep_select(mbase, epnum);
  1263. request = next_request(musb_ep);
  1264. if (value) {
  1265. if (request) {
  1266. dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
  1267. ep->name);
  1268. status = -EAGAIN;
  1269. goto done;
  1270. }
  1271. /* Cannot portably stall with non-empty FIFO */
  1272. if (musb_ep->is_in) {
  1273. csr = musb_readw(epio, MUSB_TXCSR);
  1274. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  1275. dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
  1276. status = -EAGAIN;
  1277. goto done;
  1278. }
  1279. }
  1280. } else
  1281. musb_ep->wedged = 0;
  1282. /* set/clear the stall and toggle bits */
  1283. dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
  1284. if (musb_ep->is_in) {
  1285. csr = musb_readw(epio, MUSB_TXCSR);
  1286. csr |= MUSB_TXCSR_P_WZC_BITS
  1287. | MUSB_TXCSR_CLRDATATOG;
  1288. if (value)
  1289. csr |= MUSB_TXCSR_P_SENDSTALL;
  1290. else
  1291. csr &= ~(MUSB_TXCSR_P_SENDSTALL
  1292. | MUSB_TXCSR_P_SENTSTALL);
  1293. csr &= ~MUSB_TXCSR_TXPKTRDY;
  1294. musb_writew(epio, MUSB_TXCSR, csr);
  1295. } else {
  1296. csr = musb_readw(epio, MUSB_RXCSR);
  1297. csr |= MUSB_RXCSR_P_WZC_BITS
  1298. | MUSB_RXCSR_FLUSHFIFO
  1299. | MUSB_RXCSR_CLRDATATOG;
  1300. if (value)
  1301. csr |= MUSB_RXCSR_P_SENDSTALL;
  1302. else
  1303. csr &= ~(MUSB_RXCSR_P_SENDSTALL
  1304. | MUSB_RXCSR_P_SENTSTALL);
  1305. musb_writew(epio, MUSB_RXCSR, csr);
  1306. }
  1307. /* maybe start the first request in the queue */
  1308. if (!musb_ep->busy && !value && request) {
  1309. dev_dbg(musb->controller, "restarting the request\n");
  1310. musb_ep_restart(musb, request);
  1311. }
  1312. done:
  1313. spin_unlock_irqrestore(&musb->lock, flags);
  1314. return status;
  1315. }
  1316. #ifndef __UBOOT__
  1317. /*
  1318. * Sets the halt feature with the clear requests ignored
  1319. */
  1320. static int musb_gadget_set_wedge(struct usb_ep *ep)
  1321. {
  1322. struct musb_ep *musb_ep = to_musb_ep(ep);
  1323. if (!ep)
  1324. return -EINVAL;
  1325. musb_ep->wedged = 1;
  1326. return usb_ep_set_halt(ep);
  1327. }
  1328. #endif
  1329. static int musb_gadget_fifo_status(struct usb_ep *ep)
  1330. {
  1331. struct musb_ep *musb_ep = to_musb_ep(ep);
  1332. void __iomem *epio = musb_ep->hw_ep->regs;
  1333. int retval = -EINVAL;
  1334. if (musb_ep->desc && !musb_ep->is_in) {
  1335. struct musb *musb = musb_ep->musb;
  1336. int epnum = musb_ep->current_epnum;
  1337. void __iomem *mbase = musb->mregs;
  1338. unsigned long flags;
  1339. spin_lock_irqsave(&musb->lock, flags);
  1340. musb_ep_select(mbase, epnum);
  1341. /* FIXME return zero unless RXPKTRDY is set */
  1342. retval = musb_readw(epio, MUSB_RXCOUNT);
  1343. spin_unlock_irqrestore(&musb->lock, flags);
  1344. }
  1345. return retval;
  1346. }
  1347. static void musb_gadget_fifo_flush(struct usb_ep *ep)
  1348. {
  1349. struct musb_ep *musb_ep = to_musb_ep(ep);
  1350. struct musb *musb = musb_ep->musb;
  1351. u8 epnum = musb_ep->current_epnum;
  1352. void __iomem *epio = musb->endpoints[epnum].regs;
  1353. void __iomem *mbase;
  1354. unsigned long flags;
  1355. u16 csr, int_txe;
  1356. mbase = musb->mregs;
  1357. spin_lock_irqsave(&musb->lock, flags);
  1358. musb_ep_select(mbase, (u8) epnum);
  1359. /* disable interrupts */
  1360. int_txe = musb_readw(mbase, MUSB_INTRTXE);
  1361. musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
  1362. if (musb_ep->is_in) {
  1363. csr = musb_readw(epio, MUSB_TXCSR);
  1364. if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
  1365. csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
  1366. /*
  1367. * Setting both TXPKTRDY and FLUSHFIFO makes controller
  1368. * to interrupt current FIFO loading, but not flushing
  1369. * the already loaded ones.
  1370. */
  1371. csr &= ~MUSB_TXCSR_TXPKTRDY;
  1372. musb_writew(epio, MUSB_TXCSR, csr);
  1373. /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
  1374. musb_writew(epio, MUSB_TXCSR, csr);
  1375. }
  1376. } else {
  1377. csr = musb_readw(epio, MUSB_RXCSR);
  1378. csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
  1379. musb_writew(epio, MUSB_RXCSR, csr);
  1380. musb_writew(epio, MUSB_RXCSR, csr);
  1381. }
  1382. /* re-enable interrupt */
  1383. musb_writew(mbase, MUSB_INTRTXE, int_txe);
  1384. spin_unlock_irqrestore(&musb->lock, flags);
  1385. }
  1386. static const struct usb_ep_ops musb_ep_ops = {
  1387. .enable = musb_gadget_enable,
  1388. .disable = musb_gadget_disable,
  1389. .alloc_request = musb_alloc_request,
  1390. .free_request = musb_free_request,
  1391. .queue = musb_gadget_queue,
  1392. .dequeue = musb_gadget_dequeue,
  1393. .set_halt = musb_gadget_set_halt,
  1394. #ifndef __UBOOT__
  1395. .set_wedge = musb_gadget_set_wedge,
  1396. #endif
  1397. .fifo_status = musb_gadget_fifo_status,
  1398. .fifo_flush = musb_gadget_fifo_flush
  1399. };
  1400. /* ----------------------------------------------------------------------- */
  1401. static int musb_gadget_get_frame(struct usb_gadget *gadget)
  1402. {
  1403. struct musb *musb = gadget_to_musb(gadget);
  1404. return (int)musb_readw(musb->mregs, MUSB_FRAME);
  1405. }
  1406. static int musb_gadget_wakeup(struct usb_gadget *gadget)
  1407. {
  1408. #ifndef __UBOOT__
  1409. struct musb *musb = gadget_to_musb(gadget);
  1410. void __iomem *mregs = musb->mregs;
  1411. unsigned long flags;
  1412. int status = -EINVAL;
  1413. u8 power, devctl;
  1414. int retries;
  1415. spin_lock_irqsave(&musb->lock, flags);
  1416. switch (musb->xceiv->state) {
  1417. case OTG_STATE_B_PERIPHERAL:
  1418. /* NOTE: OTG state machine doesn't include B_SUSPENDED;
  1419. * that's part of the standard usb 1.1 state machine, and
  1420. * doesn't affect OTG transitions.
  1421. */
  1422. if (musb->may_wakeup && musb->is_suspended)
  1423. break;
  1424. goto done;
  1425. case OTG_STATE_B_IDLE:
  1426. /* Start SRP ... OTG not required. */
  1427. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1428. dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
  1429. devctl |= MUSB_DEVCTL_SESSION;
  1430. musb_writeb(mregs, MUSB_DEVCTL, devctl);
  1431. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1432. retries = 100;
  1433. while (!(devctl & MUSB_DEVCTL_SESSION)) {
  1434. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1435. if (retries-- < 1)
  1436. break;
  1437. }
  1438. retries = 10000;
  1439. while (devctl & MUSB_DEVCTL_SESSION) {
  1440. devctl = musb_readb(mregs, MUSB_DEVCTL);
  1441. if (retries-- < 1)
  1442. break;
  1443. }
  1444. spin_unlock_irqrestore(&musb->lock, flags);
  1445. otg_start_srp(musb->xceiv->otg);
  1446. spin_lock_irqsave(&musb->lock, flags);
  1447. /* Block idling for at least 1s */
  1448. musb_platform_try_idle(musb,
  1449. jiffies + msecs_to_jiffies(1 * HZ));
  1450. status = 0;
  1451. goto done;
  1452. default:
  1453. dev_dbg(musb->controller, "Unhandled wake: %s\n",
  1454. otg_state_string(musb->xceiv->state));
  1455. goto done;
  1456. }
  1457. status = 0;
  1458. power = musb_readb(mregs, MUSB_POWER);
  1459. power |= MUSB_POWER_RESUME;
  1460. musb_writeb(mregs, MUSB_POWER, power);
  1461. dev_dbg(musb->controller, "issue wakeup\n");
  1462. /* FIXME do this next chunk in a timer callback, no udelay */
  1463. mdelay(2);
  1464. power = musb_readb(mregs, MUSB_POWER);
  1465. power &= ~MUSB_POWER_RESUME;
  1466. musb_writeb(mregs, MUSB_POWER, power);
  1467. done:
  1468. spin_unlock_irqrestore(&musb->lock, flags);
  1469. return status;
  1470. #else
  1471. return 0;
  1472. #endif
  1473. }
  1474. static int
  1475. musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
  1476. {
  1477. struct musb *musb = gadget_to_musb(gadget);
  1478. musb->is_self_powered = !!is_selfpowered;
  1479. return 0;
  1480. }
  1481. static void musb_pullup(struct musb *musb, int is_on)
  1482. {
  1483. u8 power;
  1484. power = musb_readb(musb->mregs, MUSB_POWER);
  1485. if (is_on)
  1486. power |= MUSB_POWER_SOFTCONN;
  1487. else
  1488. power &= ~MUSB_POWER_SOFTCONN;
  1489. /* FIXME if on, HdrcStart; if off, HdrcStop */
  1490. dev_dbg(musb->controller, "gadget D+ pullup %s\n",
  1491. is_on ? "on" : "off");
  1492. musb_writeb(musb->mregs, MUSB_POWER, power);
  1493. }
  1494. #if 0
  1495. static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
  1496. {
  1497. dev_dbg(musb->controller, "<= %s =>\n", __func__);
  1498. /*
  1499. * FIXME iff driver's softconnect flag is set (as it is during probe,
  1500. * though that can clear it), just musb_pullup().
  1501. */
  1502. return -EINVAL;
  1503. }
  1504. #endif
  1505. static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
  1506. {
  1507. #ifndef __UBOOT__
  1508. struct musb *musb = gadget_to_musb(gadget);
  1509. if (!musb->xceiv->set_power)
  1510. return -EOPNOTSUPP;
  1511. return usb_phy_set_power(musb->xceiv, mA);
  1512. #else
  1513. return 0;
  1514. #endif
  1515. }
  1516. static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
  1517. {
  1518. struct musb *musb = gadget_to_musb(gadget);
  1519. unsigned long flags;
  1520. is_on = !!is_on;
  1521. pm_runtime_get_sync(musb->controller);
  1522. /* NOTE: this assumes we are sensing vbus; we'd rather
  1523. * not pullup unless the B-session is active.
  1524. */
  1525. spin_lock_irqsave(&musb->lock, flags);
  1526. if (is_on != musb->softconnect) {
  1527. musb->softconnect = is_on;
  1528. musb_pullup(musb, is_on);
  1529. }
  1530. spin_unlock_irqrestore(&musb->lock, flags);
  1531. pm_runtime_put(musb->controller);
  1532. return 0;
  1533. }
  1534. #ifndef __UBOOT__
  1535. static int musb_gadget_start(struct usb_gadget *g,
  1536. struct usb_gadget_driver *driver);
  1537. static int musb_gadget_stop(struct usb_gadget *g,
  1538. struct usb_gadget_driver *driver);
  1539. #endif
  1540. static const struct usb_gadget_ops musb_gadget_operations = {
  1541. .get_frame = musb_gadget_get_frame,
  1542. .wakeup = musb_gadget_wakeup,
  1543. .set_selfpowered = musb_gadget_set_self_powered,
  1544. /* .vbus_session = musb_gadget_vbus_session, */
  1545. .vbus_draw = musb_gadget_vbus_draw,
  1546. .pullup = musb_gadget_pullup,
  1547. #ifndef __UBOOT__
  1548. .udc_start = musb_gadget_start,
  1549. .udc_stop = musb_gadget_stop,
  1550. #endif
  1551. };
  1552. /* ----------------------------------------------------------------------- */
  1553. /* Registration */
  1554. /* Only this registration code "knows" the rule (from USB standards)
  1555. * about there being only one external upstream port. It assumes
  1556. * all peripheral ports are external...
  1557. */
  1558. #ifndef __UBOOT__
  1559. static void musb_gadget_release(struct device *dev)
  1560. {
  1561. /* kref_put(WHAT) */
  1562. dev_dbg(dev, "%s\n", __func__);
  1563. }
  1564. #endif
  1565. static void __devinit
  1566. init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
  1567. {
  1568. struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
  1569. memset(ep, 0, sizeof *ep);
  1570. ep->current_epnum = epnum;
  1571. ep->musb = musb;
  1572. ep->hw_ep = hw_ep;
  1573. ep->is_in = is_in;
  1574. INIT_LIST_HEAD(&ep->req_list);
  1575. sprintf(ep->name, "ep%d%s", epnum,
  1576. (!epnum || hw_ep->is_shared_fifo) ? "" : (
  1577. is_in ? "in" : "out"));
  1578. ep->end_point.name = ep->name;
  1579. INIT_LIST_HEAD(&ep->end_point.ep_list);
  1580. if (!epnum) {
  1581. ep->end_point.maxpacket = 64;
  1582. ep->end_point.ops = &musb_g_ep0_ops;
  1583. musb->g.ep0 = &ep->end_point;
  1584. } else {
  1585. if (is_in)
  1586. ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
  1587. else
  1588. ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
  1589. ep->end_point.ops = &musb_ep_ops;
  1590. list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
  1591. }
  1592. }
  1593. /*
  1594. * Initialize the endpoints exposed to peripheral drivers, with backlinks
  1595. * to the rest of the driver state.
  1596. */
  1597. static inline void __devinit musb_g_init_endpoints(struct musb *musb)
  1598. {
  1599. u8 epnum;
  1600. struct musb_hw_ep *hw_ep;
  1601. unsigned count = 0;
  1602. /* initialize endpoint list just once */
  1603. INIT_LIST_HEAD(&(musb->g.ep_list));
  1604. for (epnum = 0, hw_ep = musb->endpoints;
  1605. epnum < musb->nr_endpoints;
  1606. epnum++, hw_ep++) {
  1607. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1608. init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
  1609. count++;
  1610. } else {
  1611. if (hw_ep->max_packet_sz_tx) {
  1612. init_peripheral_ep(musb, &hw_ep->ep_in,
  1613. epnum, 1);
  1614. count++;
  1615. }
  1616. if (hw_ep->max_packet_sz_rx) {
  1617. init_peripheral_ep(musb, &hw_ep->ep_out,
  1618. epnum, 0);
  1619. count++;
  1620. }
  1621. }
  1622. }
  1623. }
  1624. /* called once during driver setup to initialize and link into
  1625. * the driver model; memory is zeroed.
  1626. */
  1627. int __devinit musb_gadget_setup(struct musb *musb)
  1628. {
  1629. int status;
  1630. /* REVISIT minor race: if (erroneously) setting up two
  1631. * musb peripherals at the same time, only the bus lock
  1632. * is probably held.
  1633. */
  1634. musb->g.ops = &musb_gadget_operations;
  1635. #ifndef __UBOOT__
  1636. musb->g.max_speed = USB_SPEED_HIGH;
  1637. #endif
  1638. musb->g.speed = USB_SPEED_UNKNOWN;
  1639. #ifndef __UBOOT__
  1640. /* this "gadget" abstracts/virtualizes the controller */
  1641. dev_set_name(&musb->g.dev, "gadget");
  1642. musb->g.dev.parent = musb->controller;
  1643. musb->g.dev.dma_mask = musb->controller->dma_mask;
  1644. musb->g.dev.release = musb_gadget_release;
  1645. #endif
  1646. musb->g.name = musb_driver_name;
  1647. #ifndef __UBOOT__
  1648. if (is_otg_enabled(musb))
  1649. musb->g.is_otg = 1;
  1650. #endif
  1651. musb_g_init_endpoints(musb);
  1652. musb->is_active = 0;
  1653. musb_platform_try_idle(musb, 0);
  1654. #ifndef __UBOOT__
  1655. status = device_register(&musb->g.dev);
  1656. if (status != 0) {
  1657. put_device(&musb->g.dev);
  1658. return status;
  1659. }
  1660. status = usb_add_gadget_udc(musb->controller, &musb->g);
  1661. if (status)
  1662. goto err;
  1663. #endif
  1664. return 0;
  1665. #ifndef __UBOOT__
  1666. err:
  1667. musb->g.dev.parent = NULL;
  1668. device_unregister(&musb->g.dev);
  1669. return status;
  1670. #endif
  1671. }
  1672. void musb_gadget_cleanup(struct musb *musb)
  1673. {
  1674. #ifndef __UBOOT__
  1675. usb_del_gadget_udc(&musb->g);
  1676. if (musb->g.dev.parent)
  1677. device_unregister(&musb->g.dev);
  1678. #endif
  1679. }
  1680. /*
  1681. * Register the gadget driver. Used by gadget drivers when
  1682. * registering themselves with the controller.
  1683. *
  1684. * -EINVAL something went wrong (not driver)
  1685. * -EBUSY another gadget is already using the controller
  1686. * -ENOMEM no memory to perform the operation
  1687. *
  1688. * @param driver the gadget driver
  1689. * @return <0 if error, 0 if everything is fine
  1690. */
  1691. #ifndef __UBOOT__
  1692. static int musb_gadget_start(struct usb_gadget *g,
  1693. struct usb_gadget_driver *driver)
  1694. #else
  1695. int musb_gadget_start(struct usb_gadget *g,
  1696. struct usb_gadget_driver *driver)
  1697. #endif
  1698. {
  1699. struct musb *musb = gadget_to_musb(g);
  1700. #ifndef __UBOOT__
  1701. struct usb_otg *otg = musb->xceiv->otg;
  1702. #endif
  1703. unsigned long flags;
  1704. int retval = -EINVAL;
  1705. #ifndef __UBOOT__
  1706. if (driver->max_speed < USB_SPEED_HIGH)
  1707. goto err0;
  1708. #endif
  1709. pm_runtime_get_sync(musb->controller);
  1710. #ifndef __UBOOT__
  1711. dev_dbg(musb->controller, "registering driver %s\n", driver->function);
  1712. #endif
  1713. musb->softconnect = 0;
  1714. musb->gadget_driver = driver;
  1715. spin_lock_irqsave(&musb->lock, flags);
  1716. musb->is_active = 1;
  1717. #ifndef __UBOOT__
  1718. otg_set_peripheral(otg, &musb->g);
  1719. musb->xceiv->state = OTG_STATE_B_IDLE;
  1720. /*
  1721. * FIXME this ignores the softconnect flag. Drivers are
  1722. * allowed hold the peripheral inactive until for example
  1723. * userspace hooks up printer hardware or DSP codecs, so
  1724. * hosts only see fully functional devices.
  1725. */
  1726. if (!is_otg_enabled(musb))
  1727. #endif
  1728. musb_start(musb);
  1729. spin_unlock_irqrestore(&musb->lock, flags);
  1730. #ifndef __UBOOT__
  1731. if (is_otg_enabled(musb)) {
  1732. struct usb_hcd *hcd = musb_to_hcd(musb);
  1733. dev_dbg(musb->controller, "OTG startup...\n");
  1734. /* REVISIT: funcall to other code, which also
  1735. * handles power budgeting ... this way also
  1736. * ensures HdrcStart is indirectly called.
  1737. */
  1738. retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
  1739. if (retval < 0) {
  1740. dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
  1741. goto err2;
  1742. }
  1743. if ((musb->xceiv->last_event == USB_EVENT_ID)
  1744. && otg->set_vbus)
  1745. otg_set_vbus(otg, 1);
  1746. hcd->self.uses_pio_for_control = 1;
  1747. }
  1748. if (musb->xceiv->last_event == USB_EVENT_NONE)
  1749. pm_runtime_put(musb->controller);
  1750. #endif
  1751. return 0;
  1752. #ifndef __UBOOT__
  1753. err2:
  1754. if (!is_otg_enabled(musb))
  1755. musb_stop(musb);
  1756. err0:
  1757. return retval;
  1758. #endif
  1759. }
  1760. #ifndef __UBOOT__
  1761. static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
  1762. {
  1763. int i;
  1764. struct musb_hw_ep *hw_ep;
  1765. /* don't disconnect if it's not connected */
  1766. if (musb->g.speed == USB_SPEED_UNKNOWN)
  1767. driver = NULL;
  1768. else
  1769. musb->g.speed = USB_SPEED_UNKNOWN;
  1770. /* deactivate the hardware */
  1771. if (musb->softconnect) {
  1772. musb->softconnect = 0;
  1773. musb_pullup(musb, 0);
  1774. }
  1775. musb_stop(musb);
  1776. /* killing any outstanding requests will quiesce the driver;
  1777. * then report disconnect
  1778. */
  1779. if (driver) {
  1780. for (i = 0, hw_ep = musb->endpoints;
  1781. i < musb->nr_endpoints;
  1782. i++, hw_ep++) {
  1783. musb_ep_select(musb->mregs, i);
  1784. if (hw_ep->is_shared_fifo /* || !epnum */) {
  1785. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1786. } else {
  1787. if (hw_ep->max_packet_sz_tx)
  1788. nuke(&hw_ep->ep_in, -ESHUTDOWN);
  1789. if (hw_ep->max_packet_sz_rx)
  1790. nuke(&hw_ep->ep_out, -ESHUTDOWN);
  1791. }
  1792. }
  1793. }
  1794. }
  1795. /*
  1796. * Unregister the gadget driver. Used by gadget drivers when
  1797. * unregistering themselves from the controller.
  1798. *
  1799. * @param driver the gadget driver to unregister
  1800. */
  1801. static int musb_gadget_stop(struct usb_gadget *g,
  1802. struct usb_gadget_driver *driver)
  1803. {
  1804. struct musb *musb = gadget_to_musb(g);
  1805. unsigned long flags;
  1806. if (musb->xceiv->last_event == USB_EVENT_NONE)
  1807. pm_runtime_get_sync(musb->controller);
  1808. /*
  1809. * REVISIT always use otg_set_peripheral() here too;
  1810. * this needs to shut down the OTG engine.
  1811. */
  1812. spin_lock_irqsave(&musb->lock, flags);
  1813. musb_hnp_stop(musb);
  1814. (void) musb_gadget_vbus_draw(&musb->g, 0);
  1815. musb->xceiv->state = OTG_STATE_UNDEFINED;
  1816. stop_activity(musb, driver);
  1817. otg_set_peripheral(musb->xceiv->otg, NULL);
  1818. dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
  1819. musb->is_active = 0;
  1820. musb_platform_try_idle(musb, 0);
  1821. spin_unlock_irqrestore(&musb->lock, flags);
  1822. if (is_otg_enabled(musb)) {
  1823. usb_remove_hcd(musb_to_hcd(musb));
  1824. /* FIXME we need to be able to register another
  1825. * gadget driver here and have everything work;
  1826. * that currently misbehaves.
  1827. */
  1828. }
  1829. if (!is_otg_enabled(musb))
  1830. musb_stop(musb);
  1831. pm_runtime_put(musb->controller);
  1832. return 0;
  1833. }
  1834. #endif
  1835. /* ----------------------------------------------------------------------- */
  1836. /* lifecycle operations called through plat_uds.c */
  1837. void musb_g_resume(struct musb *musb)
  1838. {
  1839. #ifndef __UBOOT__
  1840. musb->is_suspended = 0;
  1841. switch (musb->xceiv->state) {
  1842. case OTG_STATE_B_IDLE:
  1843. break;
  1844. case OTG_STATE_B_WAIT_ACON:
  1845. case OTG_STATE_B_PERIPHERAL:
  1846. musb->is_active = 1;
  1847. if (musb->gadget_driver && musb->gadget_driver->resume) {
  1848. spin_unlock(&musb->lock);
  1849. musb->gadget_driver->resume(&musb->g);
  1850. spin_lock(&musb->lock);
  1851. }
  1852. break;
  1853. default:
  1854. WARNING("unhandled RESUME transition (%s)\n",
  1855. otg_state_string(musb->xceiv->state));
  1856. }
  1857. #endif
  1858. }
  1859. /* called when SOF packets stop for 3+ msec */
  1860. void musb_g_suspend(struct musb *musb)
  1861. {
  1862. #ifndef __UBOOT__
  1863. u8 devctl;
  1864. devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
  1865. dev_dbg(musb->controller, "devctl %02x\n", devctl);
  1866. switch (musb->xceiv->state) {
  1867. case OTG_STATE_B_IDLE:
  1868. if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
  1869. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1870. break;
  1871. case OTG_STATE_B_PERIPHERAL:
  1872. musb->is_suspended = 1;
  1873. if (musb->gadget_driver && musb->gadget_driver->suspend) {
  1874. spin_unlock(&musb->lock);
  1875. musb->gadget_driver->suspend(&musb->g);
  1876. spin_lock(&musb->lock);
  1877. }
  1878. break;
  1879. default:
  1880. /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
  1881. * A_PERIPHERAL may need care too
  1882. */
  1883. WARNING("unhandled SUSPEND transition (%s)\n",
  1884. otg_state_string(musb->xceiv->state));
  1885. }
  1886. #endif
  1887. }
  1888. /* Called during SRP */
  1889. void musb_g_wakeup(struct musb *musb)
  1890. {
  1891. musb_gadget_wakeup(&musb->g);
  1892. }
  1893. /* called when VBUS drops below session threshold, and in other cases */
  1894. void musb_g_disconnect(struct musb *musb)
  1895. {
  1896. void __iomem *mregs = musb->mregs;
  1897. u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
  1898. dev_dbg(musb->controller, "devctl %02x\n", devctl);
  1899. /* clear HR */
  1900. musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
  1901. /* don't draw vbus until new b-default session */
  1902. (void) musb_gadget_vbus_draw(&musb->g, 0);
  1903. musb->g.speed = USB_SPEED_UNKNOWN;
  1904. if (musb->gadget_driver && musb->gadget_driver->disconnect) {
  1905. spin_unlock(&musb->lock);
  1906. musb->gadget_driver->disconnect(&musb->g);
  1907. spin_lock(&musb->lock);
  1908. }
  1909. #ifndef __UBOOT__
  1910. switch (musb->xceiv->state) {
  1911. default:
  1912. dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
  1913. otg_state_string(musb->xceiv->state));
  1914. musb->xceiv->state = OTG_STATE_A_IDLE;
  1915. MUSB_HST_MODE(musb);
  1916. break;
  1917. case OTG_STATE_A_PERIPHERAL:
  1918. musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
  1919. MUSB_HST_MODE(musb);
  1920. break;
  1921. case OTG_STATE_B_WAIT_ACON:
  1922. case OTG_STATE_B_HOST:
  1923. case OTG_STATE_B_PERIPHERAL:
  1924. case OTG_STATE_B_IDLE:
  1925. musb->xceiv->state = OTG_STATE_B_IDLE;
  1926. break;
  1927. case OTG_STATE_B_SRP_INIT:
  1928. break;
  1929. }
  1930. #endif
  1931. musb->is_active = 0;
  1932. }
  1933. void musb_g_reset(struct musb *musb)
  1934. __releases(musb->lock)
  1935. __acquires(musb->lock)
  1936. {
  1937. void __iomem *mbase = musb->mregs;
  1938. u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
  1939. u8 power;
  1940. #ifndef __UBOOT__
  1941. dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
  1942. (devctl & MUSB_DEVCTL_BDEVICE)
  1943. ? "B-Device" : "A-Device",
  1944. musb_readb(mbase, MUSB_FADDR),
  1945. musb->gadget_driver
  1946. ? musb->gadget_driver->driver.name
  1947. : NULL
  1948. );
  1949. #endif
  1950. /* report disconnect, if we didn't already (flushing EP state) */
  1951. if (musb->g.speed != USB_SPEED_UNKNOWN)
  1952. musb_g_disconnect(musb);
  1953. /* clear HR */
  1954. else if (devctl & MUSB_DEVCTL_HR)
  1955. musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
  1956. /* what speed did we negotiate? */
  1957. power = musb_readb(mbase, MUSB_POWER);
  1958. musb->g.speed = (power & MUSB_POWER_HSMODE)
  1959. ? USB_SPEED_HIGH : USB_SPEED_FULL;
  1960. /* start in USB_STATE_DEFAULT */
  1961. musb->is_active = 1;
  1962. musb->is_suspended = 0;
  1963. MUSB_DEV_MODE(musb);
  1964. musb->address = 0;
  1965. musb->ep0_state = MUSB_EP0_STAGE_SETUP;
  1966. musb->may_wakeup = 0;
  1967. musb->g.b_hnp_enable = 0;
  1968. musb->g.a_alt_hnp_support = 0;
  1969. musb->g.a_hnp_support = 0;
  1970. #ifndef __UBOOT__
  1971. /* Normal reset, as B-Device;
  1972. * or else after HNP, as A-Device
  1973. */
  1974. if (devctl & MUSB_DEVCTL_BDEVICE) {
  1975. musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
  1976. musb->g.is_a_peripheral = 0;
  1977. } else if (is_otg_enabled(musb)) {
  1978. musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
  1979. musb->g.is_a_peripheral = 1;
  1980. } else
  1981. WARN_ON(1);
  1982. /* start with default limits on VBUS power draw */
  1983. (void) musb_gadget_vbus_draw(&musb->g,
  1984. is_otg_enabled(musb) ? 8 : 100);
  1985. #endif
  1986. }