dwc2_udc_otg_xfer_dma.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480
  1. /*
  2. * drivers/usb/gadget/dwc2_udc_otg_xfer_dma.c
  3. * Designware DWC2 on-chip full/high speed USB OTG 2.0 device controllers
  4. *
  5. * Copyright (C) 2009 for Samsung Electronics
  6. *
  7. * BSP Support for Samsung's UDC driver
  8. * available at:
  9. * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git
  10. *
  11. * State machine bugfixes:
  12. * Marek Szyprowski <m.szyprowski@samsung.com>
  13. *
  14. * Ported to u-boot:
  15. * Marek Szyprowski <m.szyprowski@samsung.com>
  16. * Lukasz Majewski <l.majewski@samsumg.com>
  17. *
  18. * SPDX-License-Identifier: GPL-2.0+
  19. */
  20. static u8 clear_feature_num;
  21. int clear_feature_flag;
  22. /* Bulk-Only Mass Storage Reset (class-specific request) */
  23. #define GET_MAX_LUN_REQUEST 0xFE
  24. #define BOT_RESET_REQUEST 0xFF
  25. static inline void dwc2_udc_ep0_zlp(struct dwc2_udc *dev)
  26. {
  27. u32 ep_ctrl;
  28. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  29. writel(DIEPT_SIZ_PKT_CNT(1), &reg->in_endp[EP0_CON].dieptsiz);
  30. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  31. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  32. &reg->in_endp[EP0_CON].diepctl);
  33. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  34. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  35. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  36. }
  37. static void dwc2_udc_pre_setup(void)
  38. {
  39. u32 ep_ctrl;
  40. debug_cond(DEBUG_IN_EP,
  41. "%s : Prepare Setup packets.\n", __func__);
  42. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  43. &reg->out_endp[EP0_CON].doeptsiz);
  44. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  45. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  46. writel(ep_ctrl|DEPCTL_EPENA, &reg->out_endp[EP0_CON].doepctl);
  47. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  48. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  49. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  50. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  51. }
  52. static inline void dwc2_ep0_complete_out(void)
  53. {
  54. u32 ep_ctrl;
  55. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  56. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  57. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  58. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  59. debug_cond(DEBUG_IN_EP,
  60. "%s : Prepare Complete Out packet.\n", __func__);
  61. writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest),
  62. &reg->out_endp[EP0_CON].doeptsiz);
  63. writel(usb_ctrl_dma_addr, &reg->out_endp[EP0_CON].doepdma);
  64. ep_ctrl = readl(&reg->out_endp[EP0_CON].doepctl);
  65. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  66. &reg->out_endp[EP0_CON].doepctl);
  67. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n",
  68. __func__, readl(&reg->in_endp[EP0_CON].diepctl));
  69. debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n",
  70. __func__, readl(&reg->out_endp[EP0_CON].doepctl));
  71. }
  72. static int setdma_rx(struct dwc2_ep *ep, struct dwc2_request *req)
  73. {
  74. u32 *buf, ctrl;
  75. u32 length, pktcnt;
  76. u32 ep_num = ep_index(ep);
  77. buf = req->req.buf + req->req.actual;
  78. length = min_t(u32, req->req.length - req->req.actual,
  79. ep_num ? DMA_BUFFER_SIZE : ep->ep.maxpacket);
  80. ep->len = length;
  81. ep->dma_buf = buf;
  82. if (ep_num == EP0_CON || length == 0)
  83. pktcnt = 1;
  84. else
  85. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  86. ctrl = readl(&reg->out_endp[ep_num].doepctl);
  87. writel((unsigned int) ep->dma_buf, &reg->out_endp[ep_num].doepdma);
  88. writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length),
  89. &reg->out_endp[ep_num].doeptsiz);
  90. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->out_endp[ep_num].doepctl);
  91. debug_cond(DEBUG_OUT_EP != 0,
  92. "%s: EP%d RX DMA start : DOEPDMA = 0x%x,"
  93. "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n"
  94. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  95. __func__, ep_num,
  96. readl(&reg->out_endp[ep_num].doepdma),
  97. readl(&reg->out_endp[ep_num].doeptsiz),
  98. readl(&reg->out_endp[ep_num].doepctl),
  99. buf, pktcnt, length);
  100. return 0;
  101. }
  102. static int setdma_tx(struct dwc2_ep *ep, struct dwc2_request *req)
  103. {
  104. u32 *buf, ctrl = 0;
  105. u32 length, pktcnt;
  106. u32 ep_num = ep_index(ep);
  107. buf = req->req.buf + req->req.actual;
  108. length = req->req.length - req->req.actual;
  109. if (ep_num == EP0_CON)
  110. length = min(length, (u32)ep_maxpacket(ep));
  111. ep->len = length;
  112. ep->dma_buf = buf;
  113. flush_dcache_range((unsigned long) ep->dma_buf,
  114. (unsigned long) ep->dma_buf +
  115. ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE));
  116. if (length == 0)
  117. pktcnt = 1;
  118. else
  119. pktcnt = (length - 1)/(ep->ep.maxpacket) + 1;
  120. /* Flush the endpoint's Tx FIFO */
  121. writel(TX_FIFO_NUMBER(ep->fifo_num), &reg->grstctl);
  122. writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, &reg->grstctl);
  123. while (readl(&reg->grstctl) & TX_FIFO_FLUSH)
  124. ;
  125. writel((unsigned long) ep->dma_buf, &reg->in_endp[ep_num].diepdma);
  126. writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length),
  127. &reg->in_endp[ep_num].dieptsiz);
  128. ctrl = readl(&reg->in_endp[ep_num].diepctl);
  129. /* Write the FIFO number to be used for this endpoint */
  130. ctrl &= DIEPCTL_TX_FIFO_NUM_MASK;
  131. ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num);
  132. /* Clear reserved (Next EP) bits */
  133. ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT));
  134. writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, &reg->in_endp[ep_num].diepctl);
  135. debug_cond(DEBUG_IN_EP,
  136. "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x,"
  137. "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n"
  138. "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n",
  139. __func__, ep_num,
  140. readl(&reg->in_endp[ep_num].diepdma),
  141. readl(&reg->in_endp[ep_num].dieptsiz),
  142. readl(&reg->in_endp[ep_num].diepctl),
  143. buf, pktcnt, length);
  144. return length;
  145. }
  146. static void complete_rx(struct dwc2_udc *dev, u8 ep_num)
  147. {
  148. struct dwc2_ep *ep = &dev->ep[ep_num];
  149. struct dwc2_request *req = NULL;
  150. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  151. if (list_empty(&ep->queue)) {
  152. debug_cond(DEBUG_OUT_EP != 0,
  153. "%s: RX DMA done : NULL REQ on OUT EP-%d\n",
  154. __func__, ep_num);
  155. return;
  156. }
  157. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  158. ep_tsr = readl(&reg->out_endp[ep_num].doeptsiz);
  159. if (ep_num == EP0_CON)
  160. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0);
  161. else
  162. xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP);
  163. xfer_size = ep->len - xfer_size;
  164. /*
  165. * NOTE:
  166. *
  167. * Please be careful with proper buffer allocation for USB request,
  168. * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only
  169. * with starting address, but also its size shall be a cache line
  170. * multiplication.
  171. *
  172. * This will prevent from corruption of data allocated immediatelly
  173. * before or after the buffer.
  174. *
  175. * For armv7, the cache_v7.c provides proper code to emit "ERROR"
  176. * message to warn users.
  177. */
  178. invalidate_dcache_range((unsigned long) ep->dma_buf,
  179. (unsigned long) ep->dma_buf +
  180. ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE));
  181. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  182. is_short = (xfer_size < ep->ep.maxpacket);
  183. debug_cond(DEBUG_OUT_EP != 0,
  184. "%s: RX DMA done : ep = %d, rx bytes = %d/%d, "
  185. "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n",
  186. __func__, ep_num, req->req.actual, req->req.length,
  187. is_short, ep_tsr, xfer_size);
  188. if (is_short || req->req.actual == req->req.length) {
  189. if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) {
  190. debug_cond(DEBUG_OUT_EP != 0, " => Send ZLP\n");
  191. dwc2_udc_ep0_zlp(dev);
  192. /* packet will be completed in complete_tx() */
  193. dev->ep0state = WAIT_FOR_IN_COMPLETE;
  194. } else {
  195. done(ep, req, 0);
  196. if (!list_empty(&ep->queue)) {
  197. req = list_entry(ep->queue.next,
  198. struct dwc2_request, queue);
  199. debug_cond(DEBUG_OUT_EP != 0,
  200. "%s: Next Rx request start...\n",
  201. __func__);
  202. setdma_rx(ep, req);
  203. }
  204. }
  205. } else
  206. setdma_rx(ep, req);
  207. }
  208. static void complete_tx(struct dwc2_udc *dev, u8 ep_num)
  209. {
  210. struct dwc2_ep *ep = &dev->ep[ep_num];
  211. struct dwc2_request *req;
  212. u32 ep_tsr = 0, xfer_size = 0, is_short = 0;
  213. u32 last;
  214. if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) {
  215. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  216. dwc2_ep0_complete_out();
  217. return;
  218. }
  219. if (list_empty(&ep->queue)) {
  220. debug_cond(DEBUG_IN_EP,
  221. "%s: TX DMA done : NULL REQ on IN EP-%d\n",
  222. __func__, ep_num);
  223. return;
  224. }
  225. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  226. ep_tsr = readl(&reg->in_endp[ep_num].dieptsiz);
  227. xfer_size = ep->len;
  228. is_short = (xfer_size < ep->ep.maxpacket);
  229. req->req.actual += min(xfer_size, req->req.length - req->req.actual);
  230. debug_cond(DEBUG_IN_EP,
  231. "%s: TX DMA done : ep = %d, tx bytes = %d/%d, "
  232. "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n",
  233. __func__, ep_num, req->req.actual, req->req.length,
  234. is_short, ep_tsr, xfer_size);
  235. if (ep_num == 0) {
  236. if (dev->ep0state == DATA_STATE_XMIT) {
  237. debug_cond(DEBUG_IN_EP,
  238. "%s: ep_num = %d, ep0stat =="
  239. "DATA_STATE_XMIT\n",
  240. __func__, ep_num);
  241. last = write_fifo_ep0(ep, req);
  242. if (last)
  243. dev->ep0state = WAIT_FOR_COMPLETE;
  244. } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) {
  245. debug_cond(DEBUG_IN_EP,
  246. "%s: ep_num = %d, completing request\n",
  247. __func__, ep_num);
  248. done(ep, req, 0);
  249. dev->ep0state = WAIT_FOR_SETUP;
  250. } else if (dev->ep0state == WAIT_FOR_COMPLETE) {
  251. debug_cond(DEBUG_IN_EP,
  252. "%s: ep_num = %d, completing request\n",
  253. __func__, ep_num);
  254. done(ep, req, 0);
  255. dev->ep0state = WAIT_FOR_OUT_COMPLETE;
  256. dwc2_ep0_complete_out();
  257. } else {
  258. debug_cond(DEBUG_IN_EP,
  259. "%s: ep_num = %d, invalid ep state\n",
  260. __func__, ep_num);
  261. }
  262. return;
  263. }
  264. if (req->req.actual == req->req.length)
  265. done(ep, req, 0);
  266. if (!list_empty(&ep->queue)) {
  267. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  268. debug_cond(DEBUG_IN_EP,
  269. "%s: Next Tx request start...\n", __func__);
  270. setdma_tx(ep, req);
  271. }
  272. }
  273. static inline void dwc2_udc_check_tx_queue(struct dwc2_udc *dev, u8 ep_num)
  274. {
  275. struct dwc2_ep *ep = &dev->ep[ep_num];
  276. struct dwc2_request *req;
  277. debug_cond(DEBUG_IN_EP,
  278. "%s: Check queue, ep_num = %d\n", __func__, ep_num);
  279. if (!list_empty(&ep->queue)) {
  280. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  281. debug_cond(DEBUG_IN_EP,
  282. "%s: Next Tx request(0x%p) start...\n",
  283. __func__, req);
  284. if (ep_is_in(ep))
  285. setdma_tx(ep, req);
  286. else
  287. setdma_rx(ep, req);
  288. } else {
  289. debug_cond(DEBUG_IN_EP,
  290. "%s: NULL REQ on IN EP-%d\n", __func__, ep_num);
  291. return;
  292. }
  293. }
  294. static void process_ep_in_intr(struct dwc2_udc *dev)
  295. {
  296. u32 ep_intr, ep_intr_status;
  297. u8 ep_num = 0;
  298. ep_intr = readl(&reg->daint);
  299. debug_cond(DEBUG_IN_EP,
  300. "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr);
  301. ep_intr &= DAINT_MASK;
  302. while (ep_intr) {
  303. if (ep_intr & DAINT_IN_EP_INT(1)) {
  304. ep_intr_status = readl(&reg->in_endp[ep_num].diepint);
  305. debug_cond(DEBUG_IN_EP,
  306. "\tEP%d-IN : DIEPINT = 0x%x\n",
  307. ep_num, ep_intr_status);
  308. /* Interrupt Clear */
  309. writel(ep_intr_status, &reg->in_endp[ep_num].diepint);
  310. if (ep_intr_status & TRANSFER_DONE) {
  311. complete_tx(dev, ep_num);
  312. if (ep_num == 0) {
  313. if (dev->ep0state ==
  314. WAIT_FOR_IN_COMPLETE)
  315. dev->ep0state = WAIT_FOR_SETUP;
  316. if (dev->ep0state == WAIT_FOR_SETUP)
  317. dwc2_udc_pre_setup();
  318. /* continue transfer after
  319. set_clear_halt for DMA mode */
  320. if (clear_feature_flag == 1) {
  321. dwc2_udc_check_tx_queue(dev,
  322. clear_feature_num);
  323. clear_feature_flag = 0;
  324. }
  325. }
  326. }
  327. }
  328. ep_num++;
  329. ep_intr >>= 1;
  330. }
  331. }
  332. static void process_ep_out_intr(struct dwc2_udc *dev)
  333. {
  334. u32 ep_intr, ep_intr_status;
  335. u8 ep_num = 0;
  336. ep_intr = readl(&reg->daint);
  337. debug_cond(DEBUG_OUT_EP != 0,
  338. "*** %s: EP OUT interrupt : DAINT = 0x%x\n",
  339. __func__, ep_intr);
  340. ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK;
  341. while (ep_intr) {
  342. if (ep_intr & 0x1) {
  343. ep_intr_status = readl(&reg->out_endp[ep_num].doepint);
  344. debug_cond(DEBUG_OUT_EP != 0,
  345. "\tEP%d-OUT : DOEPINT = 0x%x\n",
  346. ep_num, ep_intr_status);
  347. /* Interrupt Clear */
  348. writel(ep_intr_status, &reg->out_endp[ep_num].doepint);
  349. if (ep_num == 0) {
  350. if (ep_intr_status & TRANSFER_DONE) {
  351. if (dev->ep0state !=
  352. WAIT_FOR_OUT_COMPLETE)
  353. complete_rx(dev, ep_num);
  354. else {
  355. dev->ep0state = WAIT_FOR_SETUP;
  356. dwc2_udc_pre_setup();
  357. }
  358. }
  359. if (ep_intr_status &
  360. CTRL_OUT_EP_SETUP_PHASE_DONE) {
  361. debug_cond(DEBUG_OUT_EP != 0,
  362. "SETUP packet arrived\n");
  363. dwc2_handle_ep0(dev);
  364. }
  365. } else {
  366. if (ep_intr_status & TRANSFER_DONE)
  367. complete_rx(dev, ep_num);
  368. }
  369. }
  370. ep_num++;
  371. ep_intr >>= 1;
  372. }
  373. }
  374. /*
  375. * usb client interrupt handler.
  376. */
  377. static int dwc2_udc_irq(int irq, void *_dev)
  378. {
  379. struct dwc2_udc *dev = _dev;
  380. u32 intr_status;
  381. u32 usb_status, gintmsk;
  382. unsigned long flags = 0;
  383. spin_lock_irqsave(&dev->lock, flags);
  384. intr_status = readl(&reg->gintsts);
  385. gintmsk = readl(&reg->gintmsk);
  386. debug_cond(DEBUG_ISR,
  387. "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x,"
  388. "DAINT : 0x%x, DAINTMSK : 0x%x\n",
  389. __func__, intr_status, state_names[dev->ep0state], gintmsk,
  390. readl(&reg->daint), readl(&reg->daintmsk));
  391. if (!intr_status) {
  392. spin_unlock_irqrestore(&dev->lock, flags);
  393. return IRQ_HANDLED;
  394. }
  395. if (intr_status & INT_ENUMDONE) {
  396. debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n");
  397. writel(INT_ENUMDONE, &reg->gintsts);
  398. usb_status = (readl(&reg->dsts) & 0x6);
  399. if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) {
  400. debug_cond(DEBUG_ISR,
  401. "\t\tFull Speed Detection\n");
  402. set_max_pktsize(dev, USB_SPEED_FULL);
  403. } else {
  404. debug_cond(DEBUG_ISR,
  405. "\t\tHigh Speed Detection : 0x%x\n",
  406. usb_status);
  407. set_max_pktsize(dev, USB_SPEED_HIGH);
  408. }
  409. }
  410. if (intr_status & INT_EARLY_SUSPEND) {
  411. debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n");
  412. writel(INT_EARLY_SUSPEND, &reg->gintsts);
  413. }
  414. if (intr_status & INT_SUSPEND) {
  415. usb_status = readl(&reg->dsts);
  416. debug_cond(DEBUG_ISR,
  417. "\tSuspend interrupt :(DSTS):0x%x\n", usb_status);
  418. writel(INT_SUSPEND, &reg->gintsts);
  419. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  420. && dev->driver) {
  421. if (dev->driver->suspend)
  422. dev->driver->suspend(&dev->gadget);
  423. /* HACK to let gadget detect disconnected state */
  424. if (dev->driver->disconnect) {
  425. spin_unlock_irqrestore(&dev->lock, flags);
  426. dev->driver->disconnect(&dev->gadget);
  427. spin_lock_irqsave(&dev->lock, flags);
  428. }
  429. }
  430. }
  431. if (intr_status & INT_RESUME) {
  432. debug_cond(DEBUG_ISR, "\tResume interrupt\n");
  433. writel(INT_RESUME, &reg->gintsts);
  434. if (dev->gadget.speed != USB_SPEED_UNKNOWN
  435. && dev->driver
  436. && dev->driver->resume) {
  437. dev->driver->resume(&dev->gadget);
  438. }
  439. }
  440. if (intr_status & INT_RESET) {
  441. usb_status = readl(&reg->gotgctl);
  442. debug_cond(DEBUG_ISR,
  443. "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status);
  444. writel(INT_RESET, &reg->gintsts);
  445. if ((usb_status & 0xc0000) == (0x3 << 18)) {
  446. if (reset_available) {
  447. debug_cond(DEBUG_ISR,
  448. "\t\tOTG core got reset (%d)!!\n",
  449. reset_available);
  450. reconfig_usbd(dev);
  451. dev->ep0state = WAIT_FOR_SETUP;
  452. reset_available = 0;
  453. dwc2_udc_pre_setup();
  454. } else
  455. reset_available = 1;
  456. } else {
  457. reset_available = 1;
  458. debug_cond(DEBUG_ISR,
  459. "\t\tRESET handling skipped\n");
  460. }
  461. }
  462. if (intr_status & INT_IN_EP)
  463. process_ep_in_intr(dev);
  464. if (intr_status & INT_OUT_EP)
  465. process_ep_out_intr(dev);
  466. spin_unlock_irqrestore(&dev->lock, flags);
  467. return IRQ_HANDLED;
  468. }
  469. /** Queue one request
  470. * Kickstart transfer if needed
  471. */
  472. static int dwc2_queue(struct usb_ep *_ep, struct usb_request *_req,
  473. gfp_t gfp_flags)
  474. {
  475. struct dwc2_request *req;
  476. struct dwc2_ep *ep;
  477. struct dwc2_udc *dev;
  478. unsigned long flags = 0;
  479. u32 ep_num, gintsts;
  480. req = container_of(_req, struct dwc2_request, req);
  481. if (unlikely(!_req || !_req->complete || !_req->buf
  482. || !list_empty(&req->queue))) {
  483. debug("%s: bad params\n", __func__);
  484. return -EINVAL;
  485. }
  486. ep = container_of(_ep, struct dwc2_ep, ep);
  487. if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) {
  488. debug("%s: bad ep: %s, %d, %p\n", __func__,
  489. ep->ep.name, !ep->desc, _ep);
  490. return -EINVAL;
  491. }
  492. ep_num = ep_index(ep);
  493. dev = ep->dev;
  494. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  495. debug("%s: bogus device state %p\n", __func__, dev->driver);
  496. return -ESHUTDOWN;
  497. }
  498. spin_lock_irqsave(&dev->lock, flags);
  499. _req->status = -EINPROGRESS;
  500. _req->actual = 0;
  501. /* kickstart this i/o queue? */
  502. debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p"
  503. "Q empty = %d, stopped = %d\n",
  504. __func__, _ep->name, ep_is_in(ep) ? "in" : "out",
  505. _req, _req->length, _req->buf,
  506. list_empty(&ep->queue), ep->stopped);
  507. #ifdef DEBUG
  508. {
  509. int i, len = _req->length;
  510. printf("pkt = ");
  511. if (len > 64)
  512. len = 64;
  513. for (i = 0; i < len; i++) {
  514. printf("%02x", ((u8 *)_req->buf)[i]);
  515. if ((i & 7) == 7)
  516. printf(" ");
  517. }
  518. printf("\n");
  519. }
  520. #endif
  521. if (list_empty(&ep->queue) && !ep->stopped) {
  522. if (ep_num == 0) {
  523. /* EP0 */
  524. list_add_tail(&req->queue, &ep->queue);
  525. dwc2_ep0_kick(dev, ep);
  526. req = 0;
  527. } else if (ep_is_in(ep)) {
  528. gintsts = readl(&reg->gintsts);
  529. debug_cond(DEBUG_IN_EP,
  530. "%s: ep_is_in, DWC2_UDC_OTG_GINTSTS=0x%x\n",
  531. __func__, gintsts);
  532. setdma_tx(ep, req);
  533. } else {
  534. gintsts = readl(&reg->gintsts);
  535. debug_cond(DEBUG_OUT_EP != 0,
  536. "%s:ep_is_out, DWC2_UDC_OTG_GINTSTS=0x%x\n",
  537. __func__, gintsts);
  538. setdma_rx(ep, req);
  539. }
  540. }
  541. /* pio or dma irq handler advances the queue. */
  542. if (likely(req != 0))
  543. list_add_tail(&req->queue, &ep->queue);
  544. spin_unlock_irqrestore(&dev->lock, flags);
  545. return 0;
  546. }
  547. /****************************************************************/
  548. /* End Point 0 related functions */
  549. /****************************************************************/
  550. /* return: 0 = still running, 1 = completed, negative = errno */
  551. static int write_fifo_ep0(struct dwc2_ep *ep, struct dwc2_request *req)
  552. {
  553. u32 max;
  554. unsigned count;
  555. int is_last;
  556. max = ep_maxpacket(ep);
  557. debug_cond(DEBUG_EP0 != 0, "%s: max = %d\n", __func__, max);
  558. count = setdma_tx(ep, req);
  559. /* last packet is usually short (or a zlp) */
  560. if (likely(count != max))
  561. is_last = 1;
  562. else {
  563. if (likely(req->req.length != req->req.actual + count)
  564. || req->req.zero)
  565. is_last = 0;
  566. else
  567. is_last = 1;
  568. }
  569. debug_cond(DEBUG_EP0 != 0,
  570. "%s: wrote %s %d bytes%s %d left %p\n", __func__,
  571. ep->ep.name, count,
  572. is_last ? "/L" : "",
  573. req->req.length - req->req.actual - count, req);
  574. /* requests complete when all IN data is in the FIFO */
  575. if (is_last) {
  576. ep->dev->ep0state = WAIT_FOR_SETUP;
  577. return 1;
  578. }
  579. return 0;
  580. }
  581. static int dwc2_fifo_read(struct dwc2_ep *ep, u32 *cp, int max)
  582. {
  583. invalidate_dcache_range((unsigned long)cp, (unsigned long)cp +
  584. ROUND(max, CONFIG_SYS_CACHELINE_SIZE));
  585. debug_cond(DEBUG_EP0 != 0,
  586. "%s: bytes=%d, ep_index=%d 0x%p\n", __func__,
  587. max, ep_index(ep), cp);
  588. return max;
  589. }
  590. /**
  591. * udc_set_address - set the USB address for this device
  592. * @address:
  593. *
  594. * Called from control endpoint function
  595. * after it decodes a set address setup packet.
  596. */
  597. static void udc_set_address(struct dwc2_udc *dev, unsigned char address)
  598. {
  599. u32 ctrl = readl(&reg->dcfg);
  600. writel(DEVICE_ADDRESS(address) | ctrl, &reg->dcfg);
  601. dwc2_udc_ep0_zlp(dev);
  602. debug_cond(DEBUG_EP0 != 0,
  603. "%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n",
  604. __func__, address, readl(&reg->dcfg));
  605. dev->usb_address = address;
  606. }
  607. static inline void dwc2_udc_ep0_set_stall(struct dwc2_ep *ep)
  608. {
  609. struct dwc2_udc *dev;
  610. u32 ep_ctrl = 0;
  611. dev = ep->dev;
  612. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  613. /* set the disable and stall bits */
  614. if (ep_ctrl & DEPCTL_EPENA)
  615. ep_ctrl |= DEPCTL_EPDIS;
  616. ep_ctrl |= DEPCTL_STALL;
  617. writel(ep_ctrl, &reg->in_endp[EP0_CON].diepctl);
  618. debug_cond(DEBUG_EP0 != 0,
  619. "%s: set ep%d stall, DIEPCTL0 = 0x%p\n",
  620. __func__, ep_index(ep), &reg->in_endp[EP0_CON].diepctl);
  621. /*
  622. * The application can only set this bit, and the core clears it,
  623. * when a SETUP token is received for this endpoint
  624. */
  625. dev->ep0state = WAIT_FOR_SETUP;
  626. dwc2_udc_pre_setup();
  627. }
  628. static void dwc2_ep0_read(struct dwc2_udc *dev)
  629. {
  630. struct dwc2_request *req;
  631. struct dwc2_ep *ep = &dev->ep[0];
  632. if (!list_empty(&ep->queue)) {
  633. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  634. } else {
  635. debug("%s: ---> BUG\n", __func__);
  636. BUG();
  637. return;
  638. }
  639. debug_cond(DEBUG_EP0 != 0,
  640. "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  641. __func__, req, req->req.length, req->req.actual);
  642. if (req->req.length == 0) {
  643. /* zlp for Set_configuration, Set_interface,
  644. * or Bulk-Only mass storge reset */
  645. ep->len = 0;
  646. dwc2_udc_ep0_zlp(dev);
  647. debug_cond(DEBUG_EP0 != 0,
  648. "%s: req.length = 0, bRequest = %d\n",
  649. __func__, usb_ctrl->bRequest);
  650. return;
  651. }
  652. setdma_rx(ep, req);
  653. }
  654. /*
  655. * DATA_STATE_XMIT
  656. */
  657. static int dwc2_ep0_write(struct dwc2_udc *dev)
  658. {
  659. struct dwc2_request *req;
  660. struct dwc2_ep *ep = &dev->ep[0];
  661. int ret, need_zlp = 0;
  662. if (list_empty(&ep->queue))
  663. req = 0;
  664. else
  665. req = list_entry(ep->queue.next, struct dwc2_request, queue);
  666. if (!req) {
  667. debug_cond(DEBUG_EP0 != 0, "%s: NULL REQ\n", __func__);
  668. return 0;
  669. }
  670. debug_cond(DEBUG_EP0 != 0,
  671. "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n",
  672. __func__, req, req->req.length, req->req.actual);
  673. if (req->req.length - req->req.actual == ep0_fifo_size) {
  674. /* Next write will end with the packet size, */
  675. /* so we need Zero-length-packet */
  676. need_zlp = 1;
  677. }
  678. ret = write_fifo_ep0(ep, req);
  679. if ((ret == 1) && !need_zlp) {
  680. /* Last packet */
  681. dev->ep0state = WAIT_FOR_COMPLETE;
  682. debug_cond(DEBUG_EP0 != 0,
  683. "%s: finished, waiting for status\n", __func__);
  684. } else {
  685. dev->ep0state = DATA_STATE_XMIT;
  686. debug_cond(DEBUG_EP0 != 0,
  687. "%s: not finished\n", __func__);
  688. }
  689. return 1;
  690. }
  691. static int dwc2_udc_get_status(struct dwc2_udc *dev,
  692. struct usb_ctrlrequest *crq)
  693. {
  694. u8 ep_num = crq->wIndex & 0x7F;
  695. u16 g_status = 0;
  696. u32 ep_ctrl;
  697. debug_cond(DEBUG_SETUP != 0,
  698. "%s: *** USB_REQ_GET_STATUS\n", __func__);
  699. printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK);
  700. switch (crq->bRequestType & USB_RECIP_MASK) {
  701. case USB_RECIP_INTERFACE:
  702. g_status = 0;
  703. debug_cond(DEBUG_SETUP != 0,
  704. "\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n",
  705. g_status);
  706. break;
  707. case USB_RECIP_DEVICE:
  708. g_status = 0x1; /* Self powered */
  709. debug_cond(DEBUG_SETUP != 0,
  710. "\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n",
  711. g_status);
  712. break;
  713. case USB_RECIP_ENDPOINT:
  714. if (crq->wLength > 2) {
  715. debug_cond(DEBUG_SETUP != 0,
  716. "\tGET_STATUS:Not support EP or wLength\n");
  717. return 1;
  718. }
  719. g_status = dev->ep[ep_num].stopped;
  720. debug_cond(DEBUG_SETUP != 0,
  721. "\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n",
  722. g_status);
  723. break;
  724. default:
  725. return 1;
  726. }
  727. memcpy(usb_ctrl, &g_status, sizeof(g_status));
  728. flush_dcache_range((unsigned long) usb_ctrl,
  729. (unsigned long) usb_ctrl +
  730. ROUND(sizeof(g_status), CONFIG_SYS_CACHELINE_SIZE));
  731. writel(usb_ctrl_dma_addr, &reg->in_endp[EP0_CON].diepdma);
  732. writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2),
  733. &reg->in_endp[EP0_CON].dieptsiz);
  734. ep_ctrl = readl(&reg->in_endp[EP0_CON].diepctl);
  735. writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK,
  736. &reg->in_endp[EP0_CON].diepctl);
  737. dev->ep0state = WAIT_FOR_NULL_COMPLETE;
  738. return 0;
  739. }
  740. static void dwc2_udc_set_nak(struct dwc2_ep *ep)
  741. {
  742. u8 ep_num;
  743. u32 ep_ctrl = 0;
  744. ep_num = ep_index(ep);
  745. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  746. if (ep_is_in(ep)) {
  747. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  748. ep_ctrl |= DEPCTL_SNAK;
  749. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  750. debug("%s: set NAK, DIEPCTL%d = 0x%x\n",
  751. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  752. } else {
  753. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  754. ep_ctrl |= DEPCTL_SNAK;
  755. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  756. debug("%s: set NAK, DOEPCTL%d = 0x%x\n",
  757. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  758. }
  759. return;
  760. }
  761. static void dwc2_udc_ep_set_stall(struct dwc2_ep *ep)
  762. {
  763. u8 ep_num;
  764. u32 ep_ctrl = 0;
  765. ep_num = ep_index(ep);
  766. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  767. if (ep_is_in(ep)) {
  768. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  769. /* set the disable and stall bits */
  770. if (ep_ctrl & DEPCTL_EPENA)
  771. ep_ctrl |= DEPCTL_EPDIS;
  772. ep_ctrl |= DEPCTL_STALL;
  773. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  774. debug("%s: set stall, DIEPCTL%d = 0x%x\n",
  775. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  776. } else {
  777. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  778. /* set the stall bit */
  779. ep_ctrl |= DEPCTL_STALL;
  780. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  781. debug("%s: set stall, DOEPCTL%d = 0x%x\n",
  782. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  783. }
  784. return;
  785. }
  786. static void dwc2_udc_ep_clear_stall(struct dwc2_ep *ep)
  787. {
  788. u8 ep_num;
  789. u32 ep_ctrl = 0;
  790. ep_num = ep_index(ep);
  791. debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type);
  792. if (ep_is_in(ep)) {
  793. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  794. /* clear stall bit */
  795. ep_ctrl &= ~DEPCTL_STALL;
  796. /*
  797. * USB Spec 9.4.5: For endpoints using data toggle, regardless
  798. * of whether an endpoint has the Halt feature set, a
  799. * ClearFeature(ENDPOINT_HALT) request always results in the
  800. * data toggle being reinitialized to DATA0.
  801. */
  802. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  803. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  804. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  805. }
  806. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  807. debug("%s: cleared stall, DIEPCTL%d = 0x%x\n",
  808. __func__, ep_num, readl(&reg->in_endp[ep_num].diepctl));
  809. } else {
  810. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  811. /* clear stall bit */
  812. ep_ctrl &= ~DEPCTL_STALL;
  813. if (ep->bmAttributes == USB_ENDPOINT_XFER_INT
  814. || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) {
  815. ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */
  816. }
  817. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  818. debug("%s: cleared stall, DOEPCTL%d = 0x%x\n",
  819. __func__, ep_num, readl(&reg->out_endp[ep_num].doepctl));
  820. }
  821. return;
  822. }
  823. static int dwc2_udc_set_halt(struct usb_ep *_ep, int value)
  824. {
  825. struct dwc2_ep *ep;
  826. struct dwc2_udc *dev;
  827. unsigned long flags = 0;
  828. u8 ep_num;
  829. ep = container_of(_ep, struct dwc2_ep, ep);
  830. ep_num = ep_index(ep);
  831. if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON ||
  832. ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) {
  833. debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name);
  834. return -EINVAL;
  835. }
  836. /* Attempt to halt IN ep will fail if any transfer requests
  837. * are still queue */
  838. if (value && ep_is_in(ep) && !list_empty(&ep->queue)) {
  839. debug("%s: %s queue not empty, req = %p\n",
  840. __func__, ep->ep.name,
  841. list_entry(ep->queue.next, struct dwc2_request, queue));
  842. return -EAGAIN;
  843. }
  844. dev = ep->dev;
  845. debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value);
  846. spin_lock_irqsave(&dev->lock, flags);
  847. if (value == 0) {
  848. ep->stopped = 0;
  849. dwc2_udc_ep_clear_stall(ep);
  850. } else {
  851. if (ep_num == 0)
  852. dev->ep0state = WAIT_FOR_SETUP;
  853. ep->stopped = 1;
  854. dwc2_udc_ep_set_stall(ep);
  855. }
  856. spin_unlock_irqrestore(&dev->lock, flags);
  857. return 0;
  858. }
  859. static void dwc2_udc_ep_activate(struct dwc2_ep *ep)
  860. {
  861. u8 ep_num;
  862. u32 ep_ctrl = 0, daintmsk = 0;
  863. ep_num = ep_index(ep);
  864. /* Read DEPCTLn register */
  865. if (ep_is_in(ep)) {
  866. ep_ctrl = readl(&reg->in_endp[ep_num].diepctl);
  867. daintmsk = 1 << ep_num;
  868. } else {
  869. ep_ctrl = readl(&reg->out_endp[ep_num].doepctl);
  870. daintmsk = (1 << ep_num) << DAINT_OUT_BIT;
  871. }
  872. debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n",
  873. __func__, ep_num, ep_ctrl, ep_is_in(ep));
  874. /* If the EP is already active don't change the EP Control
  875. * register. */
  876. if (!(ep_ctrl & DEPCTL_USBACTEP)) {
  877. ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) |
  878. (ep->bmAttributes << DEPCTL_TYPE_BIT);
  879. ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) |
  880. (ep->ep.maxpacket << DEPCTL_MPS_BIT);
  881. ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK);
  882. if (ep_is_in(ep)) {
  883. writel(ep_ctrl, &reg->in_endp[ep_num].diepctl);
  884. debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n",
  885. __func__, ep_num, ep_num,
  886. readl(&reg->in_endp[ep_num].diepctl));
  887. } else {
  888. writel(ep_ctrl, &reg->out_endp[ep_num].doepctl);
  889. debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n",
  890. __func__, ep_num, ep_num,
  891. readl(&reg->out_endp[ep_num].doepctl));
  892. }
  893. }
  894. /* Unmask EP Interrtupt */
  895. writel(readl(&reg->daintmsk)|daintmsk, &reg->daintmsk);
  896. debug("%s: DAINTMSK = 0x%x\n", __func__, readl(&reg->daintmsk));
  897. }
  898. static int dwc2_udc_clear_feature(struct usb_ep *_ep)
  899. {
  900. struct dwc2_udc *dev;
  901. struct dwc2_ep *ep;
  902. u8 ep_num;
  903. ep = container_of(_ep, struct dwc2_ep, ep);
  904. ep_num = ep_index(ep);
  905. dev = ep->dev;
  906. debug_cond(DEBUG_SETUP != 0,
  907. "%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n",
  908. __func__, ep_num, ep_is_in(ep), clear_feature_flag);
  909. if (usb_ctrl->wLength != 0) {
  910. debug_cond(DEBUG_SETUP != 0,
  911. "\tCLEAR_FEATURE: wLength is not zero.....\n");
  912. return 1;
  913. }
  914. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  915. case USB_RECIP_DEVICE:
  916. switch (usb_ctrl->wValue) {
  917. case USB_DEVICE_REMOTE_WAKEUP:
  918. debug_cond(DEBUG_SETUP != 0,
  919. "\tOFF:USB_DEVICE_REMOTE_WAKEUP\n");
  920. break;
  921. case USB_DEVICE_TEST_MODE:
  922. debug_cond(DEBUG_SETUP != 0,
  923. "\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n");
  924. /** @todo Add CLEAR_FEATURE for TEST modes. */
  925. break;
  926. }
  927. dwc2_udc_ep0_zlp(dev);
  928. break;
  929. case USB_RECIP_ENDPOINT:
  930. debug_cond(DEBUG_SETUP != 0,
  931. "\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n",
  932. usb_ctrl->wValue);
  933. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  934. if (ep_num == 0) {
  935. dwc2_udc_ep0_set_stall(ep);
  936. return 0;
  937. }
  938. dwc2_udc_ep0_zlp(dev);
  939. dwc2_udc_ep_clear_stall(ep);
  940. dwc2_udc_ep_activate(ep);
  941. ep->stopped = 0;
  942. clear_feature_num = ep_num;
  943. clear_feature_flag = 1;
  944. }
  945. break;
  946. }
  947. return 0;
  948. }
  949. static int dwc2_udc_set_feature(struct usb_ep *_ep)
  950. {
  951. struct dwc2_udc *dev;
  952. struct dwc2_ep *ep;
  953. u8 ep_num;
  954. ep = container_of(_ep, struct dwc2_ep, ep);
  955. ep_num = ep_index(ep);
  956. dev = ep->dev;
  957. debug_cond(DEBUG_SETUP != 0,
  958. "%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n",
  959. __func__, ep_num);
  960. if (usb_ctrl->wLength != 0) {
  961. debug_cond(DEBUG_SETUP != 0,
  962. "\tSET_FEATURE: wLength is not zero.....\n");
  963. return 1;
  964. }
  965. switch (usb_ctrl->bRequestType & USB_RECIP_MASK) {
  966. case USB_RECIP_DEVICE:
  967. switch (usb_ctrl->wValue) {
  968. case USB_DEVICE_REMOTE_WAKEUP:
  969. debug_cond(DEBUG_SETUP != 0,
  970. "\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n");
  971. break;
  972. case USB_DEVICE_B_HNP_ENABLE:
  973. debug_cond(DEBUG_SETUP != 0,
  974. "\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
  975. break;
  976. case USB_DEVICE_A_HNP_SUPPORT:
  977. /* RH port supports HNP */
  978. debug_cond(DEBUG_SETUP != 0,
  979. "\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n");
  980. break;
  981. case USB_DEVICE_A_ALT_HNP_SUPPORT:
  982. /* other RH port does */
  983. debug_cond(DEBUG_SETUP != 0,
  984. "\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
  985. break;
  986. }
  987. dwc2_udc_ep0_zlp(dev);
  988. return 0;
  989. case USB_RECIP_INTERFACE:
  990. debug_cond(DEBUG_SETUP != 0,
  991. "\tSET_FEATURE: USB_RECIP_INTERFACE\n");
  992. break;
  993. case USB_RECIP_ENDPOINT:
  994. debug_cond(DEBUG_SETUP != 0,
  995. "\tSET_FEATURE: USB_RECIP_ENDPOINT\n");
  996. if (usb_ctrl->wValue == USB_ENDPOINT_HALT) {
  997. if (ep_num == 0) {
  998. dwc2_udc_ep0_set_stall(ep);
  999. return 0;
  1000. }
  1001. ep->stopped = 1;
  1002. dwc2_udc_ep_set_stall(ep);
  1003. }
  1004. dwc2_udc_ep0_zlp(dev);
  1005. return 0;
  1006. }
  1007. return 1;
  1008. }
  1009. /*
  1010. * WAIT_FOR_SETUP (OUT_PKT_RDY)
  1011. */
  1012. static void dwc2_ep0_setup(struct dwc2_udc *dev)
  1013. {
  1014. struct dwc2_ep *ep = &dev->ep[0];
  1015. int i;
  1016. u8 ep_num;
  1017. /* Nuke all previous transfers */
  1018. nuke(ep, -EPROTO);
  1019. /* read control req from fifo (8 bytes) */
  1020. dwc2_fifo_read(ep, (u32 *)usb_ctrl, 8);
  1021. debug_cond(DEBUG_SETUP != 0,
  1022. "%s: bRequestType = 0x%x(%s), bRequest = 0x%x"
  1023. "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n",
  1024. __func__, usb_ctrl->bRequestType,
  1025. (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT",
  1026. usb_ctrl->bRequest,
  1027. usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex);
  1028. #ifdef DEBUG
  1029. {
  1030. int i, len = sizeof(*usb_ctrl);
  1031. char *p = (char *)usb_ctrl;
  1032. printf("pkt = ");
  1033. for (i = 0; i < len; i++) {
  1034. printf("%02x", ((u8 *)p)[i]);
  1035. if ((i & 7) == 7)
  1036. printf(" ");
  1037. }
  1038. printf("\n");
  1039. }
  1040. #endif
  1041. if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST &&
  1042. usb_ctrl->wLength != 1) {
  1043. debug_cond(DEBUG_SETUP != 0,
  1044. "\t%s:GET_MAX_LUN_REQUEST:invalid",
  1045. __func__);
  1046. debug_cond(DEBUG_SETUP != 0,
  1047. "wLength = %d, setup returned\n",
  1048. usb_ctrl->wLength);
  1049. dwc2_udc_ep0_set_stall(ep);
  1050. dev->ep0state = WAIT_FOR_SETUP;
  1051. return;
  1052. } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST &&
  1053. usb_ctrl->wLength != 0) {
  1054. /* Bulk-Only *mass storge reset of class-specific request */
  1055. debug_cond(DEBUG_SETUP != 0,
  1056. "%s:BOT Rest:invalid wLength =%d, setup returned\n",
  1057. __func__, usb_ctrl->wLength);
  1058. dwc2_udc_ep0_set_stall(ep);
  1059. dev->ep0state = WAIT_FOR_SETUP;
  1060. return;
  1061. }
  1062. /* Set direction of EP0 */
  1063. if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) {
  1064. ep->bEndpointAddress |= USB_DIR_IN;
  1065. } else {
  1066. ep->bEndpointAddress &= ~USB_DIR_IN;
  1067. }
  1068. /* cope with automagic for some standard requests. */
  1069. dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK)
  1070. == USB_TYPE_STANDARD;
  1071. dev->req_pending = 1;
  1072. /* Handle some SETUP packets ourselves */
  1073. if (dev->req_std) {
  1074. switch (usb_ctrl->bRequest) {
  1075. case USB_REQ_SET_ADDRESS:
  1076. debug_cond(DEBUG_SETUP != 0,
  1077. "%s: *** USB_REQ_SET_ADDRESS (%d)\n",
  1078. __func__, usb_ctrl->wValue);
  1079. if (usb_ctrl->bRequestType
  1080. != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
  1081. break;
  1082. udc_set_address(dev, usb_ctrl->wValue);
  1083. return;
  1084. case USB_REQ_SET_CONFIGURATION:
  1085. debug_cond(DEBUG_SETUP != 0,
  1086. "=====================================\n");
  1087. debug_cond(DEBUG_SETUP != 0,
  1088. "%s: USB_REQ_SET_CONFIGURATION (%d)\n",
  1089. __func__, usb_ctrl->wValue);
  1090. if (usb_ctrl->bRequestType == USB_RECIP_DEVICE)
  1091. reset_available = 1;
  1092. break;
  1093. case USB_REQ_GET_DESCRIPTOR:
  1094. debug_cond(DEBUG_SETUP != 0,
  1095. "%s: *** USB_REQ_GET_DESCRIPTOR\n",
  1096. __func__);
  1097. break;
  1098. case USB_REQ_SET_INTERFACE:
  1099. debug_cond(DEBUG_SETUP != 0,
  1100. "%s: *** USB_REQ_SET_INTERFACE (%d)\n",
  1101. __func__, usb_ctrl->wValue);
  1102. if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE)
  1103. reset_available = 1;
  1104. break;
  1105. case USB_REQ_GET_CONFIGURATION:
  1106. debug_cond(DEBUG_SETUP != 0,
  1107. "%s: *** USB_REQ_GET_CONFIGURATION\n",
  1108. __func__);
  1109. break;
  1110. case USB_REQ_GET_STATUS:
  1111. if (!dwc2_udc_get_status(dev, usb_ctrl))
  1112. return;
  1113. break;
  1114. case USB_REQ_CLEAR_FEATURE:
  1115. ep_num = usb_ctrl->wIndex & 0x7f;
  1116. if (!dwc2_udc_clear_feature(&dev->ep[ep_num].ep))
  1117. return;
  1118. break;
  1119. case USB_REQ_SET_FEATURE:
  1120. ep_num = usb_ctrl->wIndex & 0x7f;
  1121. if (!dwc2_udc_set_feature(&dev->ep[ep_num].ep))
  1122. return;
  1123. break;
  1124. default:
  1125. debug_cond(DEBUG_SETUP != 0,
  1126. "%s: *** Default of usb_ctrl->bRequest=0x%x"
  1127. "happened.\n", __func__, usb_ctrl->bRequest);
  1128. break;
  1129. }
  1130. }
  1131. if (likely(dev->driver)) {
  1132. /* device-2-host (IN) or no data setup command,
  1133. * process immediately */
  1134. debug_cond(DEBUG_SETUP != 0,
  1135. "%s:usb_ctrlreq will be passed to fsg_setup()\n",
  1136. __func__);
  1137. spin_unlock(&dev->lock);
  1138. i = dev->driver->setup(&dev->gadget, usb_ctrl);
  1139. spin_lock(&dev->lock);
  1140. if (i < 0) {
  1141. /* setup processing failed, force stall */
  1142. dwc2_udc_ep0_set_stall(ep);
  1143. dev->ep0state = WAIT_FOR_SETUP;
  1144. debug_cond(DEBUG_SETUP != 0,
  1145. "\tdev->driver->setup failed (%d),"
  1146. " bRequest = %d\n",
  1147. i, usb_ctrl->bRequest);
  1148. } else if (dev->req_pending) {
  1149. dev->req_pending = 0;
  1150. debug_cond(DEBUG_SETUP != 0,
  1151. "\tdev->req_pending...\n");
  1152. }
  1153. debug_cond(DEBUG_SETUP != 0,
  1154. "\tep0state = %s\n", state_names[dev->ep0state]);
  1155. }
  1156. }
  1157. /*
  1158. * handle ep0 interrupt
  1159. */
  1160. static void dwc2_handle_ep0(struct dwc2_udc *dev)
  1161. {
  1162. if (dev->ep0state == WAIT_FOR_SETUP) {
  1163. debug_cond(DEBUG_OUT_EP != 0,
  1164. "%s: WAIT_FOR_SETUP\n", __func__);
  1165. dwc2_ep0_setup(dev);
  1166. } else {
  1167. debug_cond(DEBUG_OUT_EP != 0,
  1168. "%s: strange state!!(state = %s)\n",
  1169. __func__, state_names[dev->ep0state]);
  1170. }
  1171. }
  1172. static void dwc2_ep0_kick(struct dwc2_udc *dev, struct dwc2_ep *ep)
  1173. {
  1174. debug_cond(DEBUG_EP0 != 0,
  1175. "%s: ep_is_in = %d\n", __func__, ep_is_in(ep));
  1176. if (ep_is_in(ep)) {
  1177. dev->ep0state = DATA_STATE_XMIT;
  1178. dwc2_ep0_write(dev);
  1179. } else {
  1180. dev->ep0state = DATA_STATE_RECV;
  1181. dwc2_ep0_read(dev);
  1182. }
  1183. }