2 * MUSB OTG driver peripheral support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/kernel.h>
37 #include <linux/list.h>
38 #include <linux/timer.h>
39 #include <linux/module.h>
40 #include <linux/smp.h>
41 #include <linux/spinlock.h>
42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
46 #include "musb_core.h"
47 #include "musb_trace.h"
50 /* ----------------------------------------------------------------------- */
52 #define is_buffer_mapped(req) (is_dma_capable() && \
53 (req->map_state != UN_MAPPED))
55 /* Maps the buffer to dma */
57 static inline void map_dma_buffer(struct musb_request *request,
58 struct musb *musb, struct musb_ep *musb_ep)
60 int compatible = true;
61 struct dma_controller *dma = musb->dma_controller;
63 request->map_state = UN_MAPPED;
65 if (!is_dma_capable() || !musb_ep->dma)
68 /* Check if DMA engine can handle this request.
69 * DMA code must reject the USB request explicitly.
70 * Default behaviour is to map the request.
72 if (dma->is_compatible)
73 compatible = dma->is_compatible(musb_ep->dma,
74 musb_ep->packet_sz, request->request.buf,
75 request->request.length);
79 if (request->request.dma == DMA_ADDR_INVALID) {
83 dma_addr = dma_map_single(
86 request->request.length,
90 ret = dma_mapping_error(musb->controller, dma_addr);
94 request->request.dma = dma_addr;
95 request->map_state = MUSB_MAPPED;
97 dma_sync_single_for_device(musb->controller,
99 request->request.length,
103 request->map_state = PRE_MAPPED;
107 /* Unmap the buffer from dma and maps it back to cpu */
108 static inline void unmap_dma_buffer(struct musb_request *request,
111 struct musb_ep *musb_ep = request->ep;
113 if (!is_buffer_mapped(request) || !musb_ep->dma)
116 if (request->request.dma == DMA_ADDR_INVALID) {
117 dev_vdbg(musb->controller,
118 "not unmapping a never mapped buffer\n");
121 if (request->map_state == MUSB_MAPPED) {
122 dma_unmap_single(musb->controller,
123 request->request.dma,
124 request->request.length,
128 request->request.dma = DMA_ADDR_INVALID;
129 } else { /* PRE_MAPPED */
130 dma_sync_single_for_cpu(musb->controller,
131 request->request.dma,
132 request->request.length,
137 request->map_state = UN_MAPPED;
141 * Immediately complete a request.
143 * @param request the request to complete
144 * @param status the status to complete the request with
145 * Context: controller locked, IRQs blocked.
147 void musb_g_giveback(
149 struct usb_request *request,
151 __releases(ep->musb->lock)
152 __acquires(ep->musb->lock)
154 struct musb_request *req;
158 req = to_musb_request(request);
160 list_del(&req->list);
161 if (req->request.status == -EINPROGRESS)
162 req->request.status = status;
166 spin_unlock(&musb->lock);
168 if (!dma_mapping_error(&musb->g.dev, request->dma))
169 unmap_dma_buffer(req, musb);
171 trace_musb_req_gb(req);
172 usb_gadget_giveback_request(&req->ep->end_point, &req->request);
173 spin_lock(&musb->lock);
177 /* ----------------------------------------------------------------------- */
180 * Abort requests queued to an endpoint using the status. Synchronous.
181 * caller locked controller and blocked irqs, and selected this ep.
183 static void nuke(struct musb_ep *ep, const int status)
185 struct musb *musb = ep->musb;
186 struct musb_request *req = NULL;
187 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
191 if (is_dma_capable() && ep->dma) {
192 struct dma_controller *c = ep->musb->dma_controller;
197 * The programming guide says that we must not clear
198 * the DMAMODE bit before DMAENAB, so we only
199 * clear it in the second write...
201 musb_writew(epio, MUSB_TXCSR,
202 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
203 musb_writew(epio, MUSB_TXCSR,
204 0 | MUSB_TXCSR_FLUSHFIFO);
206 musb_writew(epio, MUSB_RXCSR,
207 0 | MUSB_RXCSR_FLUSHFIFO);
208 musb_writew(epio, MUSB_RXCSR,
209 0 | MUSB_RXCSR_FLUSHFIFO);
212 value = c->channel_abort(ep->dma);
213 musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
214 c->channel_release(ep->dma);
218 while (!list_empty(&ep->req_list)) {
219 req = list_first_entry(&ep->req_list, struct musb_request, list);
220 musb_g_giveback(ep, &req->request, status);
224 /* ----------------------------------------------------------------------- */
226 /* Data transfers - pure PIO, pure DMA, or mixed mode */
229 * This assumes the separate CPPI engine is responding to DMA requests
230 * from the usb core ... sequenced a bit differently from mentor dma.
233 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
235 if (can_bulk_split(musb, ep->type))
236 return ep->hw_ep->max_packet_sz_tx;
238 return ep->packet_sz;
242 * An endpoint is transmitting data. This can be called either from
243 * the IRQ routine or from ep.queue() to kickstart a request on an
246 * Context: controller locked, IRQs blocked, endpoint selected
248 static void txstate(struct musb *musb, struct musb_request *req)
250 u8 epnum = req->epnum;
251 struct musb_ep *musb_ep;
252 void __iomem *epio = musb->endpoints[epnum].regs;
253 struct usb_request *request;
254 u16 fifo_count = 0, csr;
259 /* Check if EP is disabled */
260 if (!musb_ep->desc) {
261 musb_dbg(musb, "ep:%s disabled - ignore request",
262 musb_ep->end_point.name);
266 /* we shouldn't get here while DMA is active ... but we do ... */
267 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
268 musb_dbg(musb, "dma pending...");
272 /* read TXCSR before */
273 csr = musb_readw(epio, MUSB_TXCSR);
275 request = &req->request;
276 fifo_count = min(max_ep_writesize(musb, musb_ep),
277 (int)(request->length - request->actual));
279 if (csr & MUSB_TXCSR_TXPKTRDY) {
280 musb_dbg(musb, "%s old packet still ready , txcsr %03x",
281 musb_ep->end_point.name, csr);
285 if (csr & MUSB_TXCSR_P_SENDSTALL) {
286 musb_dbg(musb, "%s stalling, txcsr %03x",
287 musb_ep->end_point.name, csr);
291 musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
292 epnum, musb_ep->packet_sz, fifo_count,
295 #ifndef CONFIG_MUSB_PIO_ONLY
296 if (is_buffer_mapped(req)) {
297 struct dma_controller *c = musb->dma_controller;
300 /* setup DMA, then program endpoint CSR */
301 request_size = min_t(size_t, request->length - request->actual,
302 musb_ep->dma->max_len);
304 use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
306 /* MUSB_TXCSR_P_ISO is still set correctly */
308 if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
309 if (request_size < musb_ep->packet_sz)
310 musb_ep->dma->desired_mode = 0;
312 musb_ep->dma->desired_mode = 1;
314 use_dma = use_dma && c->channel_program(
315 musb_ep->dma, musb_ep->packet_sz,
316 musb_ep->dma->desired_mode,
317 request->dma + request->actual, request_size);
319 if (musb_ep->dma->desired_mode == 0) {
321 * We must not clear the DMAMODE bit
322 * before the DMAENAB bit -- and the
323 * latter doesn't always get cleared
324 * before we get here...
326 csr &= ~(MUSB_TXCSR_AUTOSET
327 | MUSB_TXCSR_DMAENAB);
328 musb_writew(epio, MUSB_TXCSR, csr
329 | MUSB_TXCSR_P_WZC_BITS);
330 csr &= ~MUSB_TXCSR_DMAMODE;
331 csr |= (MUSB_TXCSR_DMAENAB |
333 /* against programming guide */
335 csr |= (MUSB_TXCSR_DMAENAB
339 * Enable Autoset according to table
341 * bulk_split hb_mult Autoset_Enable
343 * 0 >0 No(High BW ISO)
347 if (!musb_ep->hb_mult ||
350 csr |= MUSB_TXCSR_AUTOSET;
352 csr &= ~MUSB_TXCSR_P_UNDERRUN;
354 musb_writew(epio, MUSB_TXCSR, csr);
358 if (is_cppi_enabled(musb)) {
359 /* program endpoint CSR first, then setup DMA */
360 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
361 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
363 musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
364 ~MUSB_TXCSR_P_UNDERRUN) | csr);
366 /* ensure writebuffer is empty */
367 csr = musb_readw(epio, MUSB_TXCSR);
370 * NOTE host side sets DMAENAB later than this; both are
371 * OK since the transfer dma glue (between CPPI and
372 * Mentor fifos) just tells CPPI it could start. Data
373 * only moves to the USB TX fifo when both fifos are
377 * "mode" is irrelevant here; handle terminating ZLPs
378 * like PIO does, since the hardware RNDIS mode seems
379 * unreliable except for the
380 * last-packet-is-already-short case.
382 use_dma = use_dma && c->channel_program(
383 musb_ep->dma, musb_ep->packet_sz,
385 request->dma + request->actual,
388 c->channel_release(musb_ep->dma);
390 csr &= ~MUSB_TXCSR_DMAENAB;
391 musb_writew(epio, MUSB_TXCSR, csr);
392 /* invariant: prequest->buf is non-null */
394 } else if (tusb_dma_omap(musb))
395 use_dma = use_dma && c->channel_program(
396 musb_ep->dma, musb_ep->packet_sz,
398 request->dma + request->actual,
405 * Unmap the dma buffer back to cpu if dma channel
408 unmap_dma_buffer(req, musb);
410 musb_write_fifo(musb_ep->hw_ep, fifo_count,
411 (u8 *) (request->buf + request->actual));
412 request->actual += fifo_count;
413 csr |= MUSB_TXCSR_TXPKTRDY;
414 csr &= ~MUSB_TXCSR_P_UNDERRUN;
415 musb_writew(epio, MUSB_TXCSR, csr);
418 /* host may already have the data when this message shows... */
419 musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
420 musb_ep->end_point.name, use_dma ? "dma" : "pio",
421 request->actual, request->length,
422 musb_readw(epio, MUSB_TXCSR),
424 musb_readw(epio, MUSB_TXMAXP));
428 * FIFO state update (e.g. data ready).
429 * Called from IRQ, with controller locked.
431 void musb_g_tx(struct musb *musb, u8 epnum)
434 struct musb_request *req;
435 struct usb_request *request;
436 u8 __iomem *mbase = musb->mregs;
437 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
438 void __iomem *epio = musb->endpoints[epnum].regs;
439 struct dma_channel *dma;
441 musb_ep_select(mbase, epnum);
442 req = next_request(musb_ep);
443 request = &req->request;
445 trace_musb_req_tx(req);
446 csr = musb_readw(epio, MUSB_TXCSR);
447 musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
449 dma = is_dma_capable() ? musb_ep->dma : NULL;
452 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
453 * probably rates reporting as a host error.
455 if (csr & MUSB_TXCSR_P_SENTSTALL) {
456 csr |= MUSB_TXCSR_P_WZC_BITS;
457 csr &= ~MUSB_TXCSR_P_SENTSTALL;
458 musb_writew(epio, MUSB_TXCSR, csr);
462 if (csr & MUSB_TXCSR_P_UNDERRUN) {
463 /* We NAKed, no big deal... little reason to care. */
464 csr |= MUSB_TXCSR_P_WZC_BITS;
465 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
466 musb_writew(epio, MUSB_TXCSR, csr);
467 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
471 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
473 * SHOULD NOT HAPPEN... has with CPPI though, after
474 * changing SENDSTALL (and other cases); harmless?
476 musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
482 bool short_packet = false;
484 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
486 csr |= MUSB_TXCSR_P_WZC_BITS;
487 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
488 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
489 musb_writew(epio, MUSB_TXCSR, csr);
490 /* Ensure writebuffer is empty. */
491 csr = musb_readw(epio, MUSB_TXCSR);
492 request->actual += musb_ep->dma->actual_len;
493 musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
494 epnum, csr, musb_ep->dma->actual_len, request);
498 * First, maybe a terminating short packet. Some DMA
499 * engines might handle this by themselves.
501 if ((request->zero && request->length)
502 && (request->length % musb_ep->packet_sz == 0)
503 && (request->actual == request->length))
506 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
507 (is_dma && (!dma->desired_mode ||
509 (musb_ep->packet_sz - 1)))))
514 * On DMA completion, FIFO may not be
517 if (csr & MUSB_TXCSR_TXPKTRDY)
520 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
521 | MUSB_TXCSR_TXPKTRDY);
525 if (request->actual == request->length) {
526 musb_g_giveback(musb_ep, request, 0);
528 * In the giveback function the MUSB lock is
529 * released and acquired after sometime. During
530 * this time period the INDEX register could get
531 * changed by the gadget_queue function especially
532 * on SMP systems. Reselect the INDEX to be sure
533 * we are reading/modifying the right registers
535 musb_ep_select(mbase, epnum);
536 req = musb_ep->desc ? next_request(musb_ep) : NULL;
538 musb_dbg(musb, "%s idle now",
539 musb_ep->end_point.name);
548 /* ------------------------------------------------------------ */
551 * Context: controller locked, IRQs blocked, endpoint selected
553 static void rxstate(struct musb *musb, struct musb_request *req)
555 const u8 epnum = req->epnum;
556 struct usb_request *request = &req->request;
557 struct musb_ep *musb_ep;
558 void __iomem *epio = musb->endpoints[epnum].regs;
561 u16 csr = musb_readw(epio, MUSB_RXCSR);
562 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
565 if (hw_ep->is_shared_fifo)
566 musb_ep = &hw_ep->ep_in;
568 musb_ep = &hw_ep->ep_out;
570 fifo_count = musb_ep->packet_sz;
572 /* Check if EP is disabled */
573 if (!musb_ep->desc) {
574 musb_dbg(musb, "ep:%s disabled - ignore request",
575 musb_ep->end_point.name);
579 /* We shouldn't get here while DMA is active, but we do... */
580 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
581 musb_dbg(musb, "DMA pending...");
585 if (csr & MUSB_RXCSR_P_SENDSTALL) {
586 musb_dbg(musb, "%s stalling, RXCSR %04x",
587 musb_ep->end_point.name, csr);
591 if (is_cppi_enabled(musb) && is_buffer_mapped(req)) {
592 struct dma_controller *c = musb->dma_controller;
593 struct dma_channel *channel = musb_ep->dma;
595 /* NOTE: CPPI won't actually stop advancing the DMA
596 * queue after short packet transfers, so this is almost
597 * always going to run as IRQ-per-packet DMA so that
598 * faults will be handled correctly.
600 if (c->channel_program(channel,
602 !request->short_not_ok,
603 request->dma + request->actual,
604 request->length - request->actual)) {
606 /* make sure that if an rxpkt arrived after the irq,
607 * the cppi engine will be ready to take it as soon
610 csr &= ~(MUSB_RXCSR_AUTOCLEAR
611 | MUSB_RXCSR_DMAMODE);
612 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
613 musb_writew(epio, MUSB_RXCSR, csr);
618 if (csr & MUSB_RXCSR_RXPKTRDY) {
619 fifo_count = musb_readw(epio, MUSB_RXCOUNT);
622 * Enable Mode 1 on RX transfers only when short_not_ok flag
623 * is set. Currently short_not_ok flag is set only from
624 * file_storage and f_mass_storage drivers
627 if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
632 if (request->actual < request->length) {
633 if (!is_buffer_mapped(req))
634 goto buffer_aint_mapped;
636 if (musb_dma_inventra(musb)) {
637 struct dma_controller *c;
638 struct dma_channel *channel;
640 unsigned int transfer_size;
642 c = musb->dma_controller;
643 channel = musb_ep->dma;
645 /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
646 * mode 0 only. So we do not get endpoint interrupts due to DMA
647 * completion. We only get interrupts from DMA controller.
649 * We could operate in DMA mode 1 if we knew the size of the tranfer
650 * in advance. For mass storage class, request->length = what the host
651 * sends, so that'd work. But for pretty much everything else,
652 * request->length is routinely more than what the host sends. For
653 * most these gadgets, end of is signified either by a short packet,
654 * or filling the last byte of the buffer. (Sending extra data in
655 * that last pckate should trigger an overflow fault.) But in mode 1,
656 * we don't get DMA completion interrupt for short packets.
658 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
659 * to get endpoint interrupt on every DMA req, but that didn't seem
662 * REVISIT an updated g_file_storage can set req->short_not_ok, which
663 * then becomes usable as a runtime "use mode 1" hint...
666 /* Experimental: Mode1 works with mass storage use cases */
668 csr |= MUSB_RXCSR_AUTOCLEAR;
669 musb_writew(epio, MUSB_RXCSR, csr);
670 csr |= MUSB_RXCSR_DMAENAB;
671 musb_writew(epio, MUSB_RXCSR, csr);
674 * this special sequence (enabling and then
675 * disabling MUSB_RXCSR_DMAMODE) is required
676 * to get DMAReq to activate
678 musb_writew(epio, MUSB_RXCSR,
679 csr | MUSB_RXCSR_DMAMODE);
680 musb_writew(epio, MUSB_RXCSR, csr);
682 transfer_size = min_t(unsigned int,
686 musb_ep->dma->desired_mode = 1;
688 if (!musb_ep->hb_mult &&
689 musb_ep->hw_ep->rx_double_buffered)
690 csr |= MUSB_RXCSR_AUTOCLEAR;
691 csr |= MUSB_RXCSR_DMAENAB;
692 musb_writew(epio, MUSB_RXCSR, csr);
694 transfer_size = min(request->length - request->actual,
695 (unsigned)fifo_count);
696 musb_ep->dma->desired_mode = 0;
699 use_dma = c->channel_program(
702 channel->desired_mode,
711 if ((musb_dma_ux500(musb)) &&
712 (request->actual < request->length)) {
714 struct dma_controller *c;
715 struct dma_channel *channel;
716 unsigned int transfer_size = 0;
718 c = musb->dma_controller;
719 channel = musb_ep->dma;
721 /* In case first packet is short */
722 if (fifo_count < musb_ep->packet_sz)
723 transfer_size = fifo_count;
724 else if (request->short_not_ok)
725 transfer_size = min_t(unsigned int,
730 transfer_size = min_t(unsigned int,
733 (unsigned)fifo_count);
735 csr &= ~MUSB_RXCSR_DMAMODE;
736 csr |= (MUSB_RXCSR_DMAENAB |
737 MUSB_RXCSR_AUTOCLEAR);
739 musb_writew(epio, MUSB_RXCSR, csr);
741 if (transfer_size <= musb_ep->packet_sz) {
742 musb_ep->dma->desired_mode = 0;
744 musb_ep->dma->desired_mode = 1;
745 /* Mode must be set after DMAENAB */
746 csr |= MUSB_RXCSR_DMAMODE;
747 musb_writew(epio, MUSB_RXCSR, csr);
750 if (c->channel_program(channel,
752 channel->desired_mode,
760 len = request->length - request->actual;
761 musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
762 musb_ep->end_point.name,
766 fifo_count = min_t(unsigned, len, fifo_count);
768 if (tusb_dma_omap(musb)) {
769 struct dma_controller *c = musb->dma_controller;
770 struct dma_channel *channel = musb_ep->dma;
771 u32 dma_addr = request->dma + request->actual;
774 ret = c->channel_program(channel,
776 channel->desired_mode,
784 * Unmap the dma buffer back to cpu if dma channel
785 * programming fails. This buffer is mapped if the
786 * channel allocation is successful
788 unmap_dma_buffer(req, musb);
791 * Clear DMAENAB and AUTOCLEAR for the
794 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
795 musb_writew(epio, MUSB_RXCSR, csr);
798 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
799 (request->buf + request->actual));
800 request->actual += fifo_count;
802 /* REVISIT if we left anything in the fifo, flush
803 * it and report -EOVERFLOW
807 csr |= MUSB_RXCSR_P_WZC_BITS;
808 csr &= ~MUSB_RXCSR_RXPKTRDY;
809 musb_writew(epio, MUSB_RXCSR, csr);
813 /* reach the end or short packet detected */
814 if (request->actual == request->length ||
815 fifo_count < musb_ep->packet_sz)
816 musb_g_giveback(musb_ep, request, 0);
820 * Data ready for a request; called from IRQ
822 void musb_g_rx(struct musb *musb, u8 epnum)
825 struct musb_request *req;
826 struct usb_request *request;
827 void __iomem *mbase = musb->mregs;
828 struct musb_ep *musb_ep;
829 void __iomem *epio = musb->endpoints[epnum].regs;
830 struct dma_channel *dma;
831 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
833 if (hw_ep->is_shared_fifo)
834 musb_ep = &hw_ep->ep_in;
836 musb_ep = &hw_ep->ep_out;
838 musb_ep_select(mbase, epnum);
840 req = next_request(musb_ep);
844 trace_musb_req_rx(req);
845 request = &req->request;
847 csr = musb_readw(epio, MUSB_RXCSR);
848 dma = is_dma_capable() ? musb_ep->dma : NULL;
850 musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
851 csr, dma ? " (dma)" : "", request);
853 if (csr & MUSB_RXCSR_P_SENTSTALL) {
854 csr |= MUSB_RXCSR_P_WZC_BITS;
855 csr &= ~MUSB_RXCSR_P_SENTSTALL;
856 musb_writew(epio, MUSB_RXCSR, csr);
860 if (csr & MUSB_RXCSR_P_OVERRUN) {
861 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
862 csr &= ~MUSB_RXCSR_P_OVERRUN;
863 musb_writew(epio, MUSB_RXCSR, csr);
865 musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
866 if (request->status == -EINPROGRESS)
867 request->status = -EOVERFLOW;
869 if (csr & MUSB_RXCSR_INCOMPRX) {
870 /* REVISIT not necessarily an error */
871 musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
874 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
875 /* "should not happen"; likely RXPKTRDY pending for DMA */
876 musb_dbg(musb, "%s busy, csr %04x",
877 musb_ep->end_point.name, csr);
881 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
882 csr &= ~(MUSB_RXCSR_AUTOCLEAR
884 | MUSB_RXCSR_DMAMODE);
885 musb_writew(epio, MUSB_RXCSR,
886 MUSB_RXCSR_P_WZC_BITS | csr);
888 request->actual += musb_ep->dma->actual_len;
890 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
891 defined(CONFIG_USB_UX500_DMA)
892 /* Autoclear doesn't clear RxPktRdy for short packets */
893 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
895 & (musb_ep->packet_sz - 1))) {
897 csr &= ~MUSB_RXCSR_RXPKTRDY;
898 musb_writew(epio, MUSB_RXCSR, csr);
901 /* incomplete, and not short? wait for next IN packet */
902 if ((request->actual < request->length)
903 && (musb_ep->dma->actual_len
904 == musb_ep->packet_sz)) {
905 /* In double buffer case, continue to unload fifo if
906 * there is Rx packet in FIFO.
908 csr = musb_readw(epio, MUSB_RXCSR);
909 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
910 hw_ep->rx_double_buffered)
915 musb_g_giveback(musb_ep, request, 0);
917 * In the giveback function the MUSB lock is
918 * released and acquired after sometime. During
919 * this time period the INDEX register could get
920 * changed by the gadget_queue function especially
921 * on SMP systems. Reselect the INDEX to be sure
922 * we are reading/modifying the right registers
924 musb_ep_select(mbase, epnum);
926 req = next_request(musb_ep);
930 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
931 defined(CONFIG_USB_UX500_DMA)
934 /* Analyze request */
938 /* ------------------------------------------------------------ */
940 static int musb_gadget_enable(struct usb_ep *ep,
941 const struct usb_endpoint_descriptor *desc)
944 struct musb_ep *musb_ep;
945 struct musb_hw_ep *hw_ep;
952 int status = -EINVAL;
957 musb_ep = to_musb_ep(ep);
958 hw_ep = musb_ep->hw_ep;
960 musb = musb_ep->musb;
962 epnum = musb_ep->current_epnum;
964 spin_lock_irqsave(&musb->lock, flags);
970 musb_ep->type = usb_endpoint_type(desc);
972 /* check direction and (later) maxpacket size against endpoint */
973 if (usb_endpoint_num(desc) != epnum)
976 /* REVISIT this rules out high bandwidth periodic transfers */
977 tmp = usb_endpoint_maxp_mult(desc) - 1;
981 if (usb_endpoint_dir_in(desc))
982 ok = musb->hb_iso_tx;
984 ok = musb->hb_iso_rx;
987 musb_dbg(musb, "no support for high bandwidth ISO");
990 musb_ep->hb_mult = tmp;
992 musb_ep->hb_mult = 0;
995 musb_ep->packet_sz = usb_endpoint_maxp(desc);
996 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
998 /* enable the interrupts for the endpoint, set the endpoint
999 * packet size (or fail), set the mode, clear the fifo
1001 musb_ep_select(mbase, epnum);
1002 if (usb_endpoint_dir_in(desc)) {
1004 if (hw_ep->is_shared_fifo)
1006 if (!musb_ep->is_in)
1009 if (tmp > hw_ep->max_packet_sz_tx) {
1010 musb_dbg(musb, "packet size beyond hardware FIFO size");
1014 musb->intrtxe |= (1 << epnum);
1015 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1017 /* REVISIT if can_bulk_split(), use by updating "tmp";
1018 * likewise high bandwidth periodic tx
1020 /* Set TXMAXP with the FIFO size of the endpoint
1021 * to disable double buffering mode.
1023 if (musb->double_buffer_not_ok) {
1024 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1026 if (can_bulk_split(musb, musb_ep->type))
1027 musb_ep->hb_mult = (hw_ep->max_packet_sz_tx /
1028 musb_ep->packet_sz) - 1;
1029 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1030 | (musb_ep->hb_mult << 11));
1033 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1034 if (musb_readw(regs, MUSB_TXCSR)
1035 & MUSB_TXCSR_FIFONOTEMPTY)
1036 csr |= MUSB_TXCSR_FLUSHFIFO;
1037 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1038 csr |= MUSB_TXCSR_P_ISO;
1040 /* set twice in case of double buffering */
1041 musb_writew(regs, MUSB_TXCSR, csr);
1042 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1043 musb_writew(regs, MUSB_TXCSR, csr);
1047 if (hw_ep->is_shared_fifo)
1052 if (tmp > hw_ep->max_packet_sz_rx) {
1053 musb_dbg(musb, "packet size beyond hardware FIFO size");
1057 musb->intrrxe |= (1 << epnum);
1058 musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe);
1060 /* REVISIT if can_bulk_combine() use by updating "tmp"
1061 * likewise high bandwidth periodic rx
1063 /* Set RXMAXP with the FIFO size of the endpoint
1064 * to disable double buffering mode.
1066 if (musb->double_buffer_not_ok)
1067 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1069 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1070 | (musb_ep->hb_mult << 11));
1072 /* force shared fifo to OUT-only mode */
1073 if (hw_ep->is_shared_fifo) {
1074 csr = musb_readw(regs, MUSB_TXCSR);
1075 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1076 musb_writew(regs, MUSB_TXCSR, csr);
1079 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1080 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1081 csr |= MUSB_RXCSR_P_ISO;
1082 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1083 csr |= MUSB_RXCSR_DISNYET;
1085 /* set twice in case of double buffering */
1086 musb_writew(regs, MUSB_RXCSR, csr);
1087 musb_writew(regs, MUSB_RXCSR, csr);
1090 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1091 * for some reason you run out of channels here.
1093 if (is_dma_capable() && musb->dma_controller) {
1094 struct dma_controller *c = musb->dma_controller;
1096 musb_ep->dma = c->channel_alloc(c, hw_ep,
1097 (desc->bEndpointAddress & USB_DIR_IN));
1099 musb_ep->dma = NULL;
1101 musb_ep->desc = desc;
1103 musb_ep->wedged = 0;
1106 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1107 musb_driver_name, musb_ep->end_point.name,
1108 ({ char *s; switch (musb_ep->type) {
1109 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1110 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1111 default: s = "iso"; break;
1113 musb_ep->is_in ? "IN" : "OUT",
1114 musb_ep->dma ? "dma, " : "",
1115 musb_ep->packet_sz);
1117 schedule_delayed_work(&musb->irq_work, 0);
1120 spin_unlock_irqrestore(&musb->lock, flags);
1125 * Disable an endpoint flushing all requests queued.
1127 static int musb_gadget_disable(struct usb_ep *ep)
1129 unsigned long flags;
1132 struct musb_ep *musb_ep;
1136 musb_ep = to_musb_ep(ep);
1137 musb = musb_ep->musb;
1138 epnum = musb_ep->current_epnum;
1139 epio = musb->endpoints[epnum].regs;
1141 spin_lock_irqsave(&musb->lock, flags);
1142 musb_ep_select(musb->mregs, epnum);
1144 /* zero the endpoint sizes */
1145 if (musb_ep->is_in) {
1146 musb->intrtxe &= ~(1 << epnum);
1147 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
1148 musb_writew(epio, MUSB_TXMAXP, 0);
1150 musb->intrrxe &= ~(1 << epnum);
1151 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
1152 musb_writew(epio, MUSB_RXMAXP, 0);
1155 /* abort all pending DMA and requests */
1156 nuke(musb_ep, -ESHUTDOWN);
1158 musb_ep->desc = NULL;
1159 musb_ep->end_point.desc = NULL;
1161 schedule_delayed_work(&musb->irq_work, 0);
1163 spin_unlock_irqrestore(&(musb->lock), flags);
1165 musb_dbg(musb, "%s", musb_ep->end_point.name);
1171 * Allocate a request for an endpoint.
1172 * Reused by ep0 code.
1174 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1176 struct musb_ep *musb_ep = to_musb_ep(ep);
1177 struct musb_request *request = NULL;
1179 request = kzalloc(sizeof *request, gfp_flags);
1183 request->request.dma = DMA_ADDR_INVALID;
1184 request->epnum = musb_ep->current_epnum;
1185 request->ep = musb_ep;
1187 trace_musb_req_alloc(request);
1188 return &request->request;
1193 * Reused by ep0 code.
1195 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1197 struct musb_request *request = to_musb_request(req);
1199 trace_musb_req_free(request);
1203 static LIST_HEAD(buffers);
1205 struct free_record {
1206 struct list_head list;
1213 * Context: controller locked, IRQs blocked.
1215 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1217 trace_musb_req_start(req);
1218 musb_ep_select(musb->mregs, req->epnum);
1225 static int musb_ep_restart_resume_work(struct musb *musb, void *data)
1227 struct musb_request *req = data;
1229 musb_ep_restart(musb, req);
1234 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1237 struct musb_ep *musb_ep;
1238 struct musb_request *request;
1241 unsigned long lockflags;
1248 musb_ep = to_musb_ep(ep);
1249 musb = musb_ep->musb;
1251 request = to_musb_request(req);
1252 request->musb = musb;
1254 if (request->ep != musb_ep)
1257 status = pm_runtime_get(musb->controller);
1258 if ((status != -EINPROGRESS) && status < 0) {
1259 dev_err(musb->controller,
1260 "pm runtime get failed in %s\n",
1262 pm_runtime_put_noidle(musb->controller);
1268 trace_musb_req_enq(request);
1270 /* request is mine now... */
1271 request->request.actual = 0;
1272 request->request.status = -EINPROGRESS;
1273 request->epnum = musb_ep->current_epnum;
1274 request->tx = musb_ep->is_in;
1276 map_dma_buffer(request, musb, musb_ep);
1278 spin_lock_irqsave(&musb->lock, lockflags);
1280 /* don't queue if the ep is down */
1281 if (!musb_ep->desc) {
1282 musb_dbg(musb, "req %p queued to %s while ep %s",
1283 req, ep->name, "disabled");
1284 status = -ESHUTDOWN;
1285 unmap_dma_buffer(request, musb);
1289 /* add request to the list */
1290 list_add_tail(&request->list, &musb_ep->req_list);
1292 /* it this is the head of the queue, start i/o ... */
1293 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
1294 status = musb_queue_resume_work(musb,
1295 musb_ep_restart_resume_work,
1298 dev_err(musb->controller, "%s resume work: %i\n",
1303 spin_unlock_irqrestore(&musb->lock, lockflags);
1304 pm_runtime_mark_last_busy(musb->controller);
1305 pm_runtime_put_autosuspend(musb->controller);
1310 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1312 struct musb_ep *musb_ep = to_musb_ep(ep);
1313 struct musb_request *req = to_musb_request(request);
1314 struct musb_request *r;
1315 unsigned long flags;
1317 struct musb *musb = musb_ep->musb;
1319 if (!ep || !request || req->ep != musb_ep)
1322 trace_musb_req_deq(req);
1324 spin_lock_irqsave(&musb->lock, flags);
1326 list_for_each_entry(r, &musb_ep->req_list, list) {
1331 dev_err(musb->controller, "request %p not queued to %s\n",
1337 /* if the hardware doesn't have the request, easy ... */
1338 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1339 musb_g_giveback(musb_ep, request, -ECONNRESET);
1341 /* ... else abort the dma transfer ... */
1342 else if (is_dma_capable() && musb_ep->dma) {
1343 struct dma_controller *c = musb->dma_controller;
1345 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1346 if (c->channel_abort)
1347 status = c->channel_abort(musb_ep->dma);
1351 musb_g_giveback(musb_ep, request, -ECONNRESET);
1353 /* NOTE: by sticking to easily tested hardware/driver states,
1354 * we leave counting of in-flight packets imprecise.
1356 musb_g_giveback(musb_ep, request, -ECONNRESET);
1360 spin_unlock_irqrestore(&musb->lock, flags);
1365 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1366 * data but will queue requests.
1368 * exported to ep0 code
1370 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1372 struct musb_ep *musb_ep = to_musb_ep(ep);
1373 u8 epnum = musb_ep->current_epnum;
1374 struct musb *musb = musb_ep->musb;
1375 void __iomem *epio = musb->endpoints[epnum].regs;
1376 void __iomem *mbase;
1377 unsigned long flags;
1379 struct musb_request *request;
1384 mbase = musb->mregs;
1386 spin_lock_irqsave(&musb->lock, flags);
1388 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1393 musb_ep_select(mbase, epnum);
1395 request = next_request(musb_ep);
1398 musb_dbg(musb, "request in progress, cannot halt %s",
1403 /* Cannot portably stall with non-empty FIFO */
1404 if (musb_ep->is_in) {
1405 csr = musb_readw(epio, MUSB_TXCSR);
1406 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1407 musb_dbg(musb, "FIFO busy, cannot halt %s",
1414 musb_ep->wedged = 0;
1416 /* set/clear the stall and toggle bits */
1417 musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
1418 if (musb_ep->is_in) {
1419 csr = musb_readw(epio, MUSB_TXCSR);
1420 csr |= MUSB_TXCSR_P_WZC_BITS
1421 | MUSB_TXCSR_CLRDATATOG;
1423 csr |= MUSB_TXCSR_P_SENDSTALL;
1425 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1426 | MUSB_TXCSR_P_SENTSTALL);
1427 csr &= ~MUSB_TXCSR_TXPKTRDY;
1428 musb_writew(epio, MUSB_TXCSR, csr);
1430 csr = musb_readw(epio, MUSB_RXCSR);
1431 csr |= MUSB_RXCSR_P_WZC_BITS
1432 | MUSB_RXCSR_FLUSHFIFO
1433 | MUSB_RXCSR_CLRDATATOG;
1435 csr |= MUSB_RXCSR_P_SENDSTALL;
1437 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1438 | MUSB_RXCSR_P_SENTSTALL);
1439 musb_writew(epio, MUSB_RXCSR, csr);
1442 /* maybe start the first request in the queue */
1443 if (!musb_ep->busy && !value && request) {
1444 musb_dbg(musb, "restarting the request");
1445 musb_ep_restart(musb, request);
1449 spin_unlock_irqrestore(&musb->lock, flags);
1454 * Sets the halt feature with the clear requests ignored
1456 static int musb_gadget_set_wedge(struct usb_ep *ep)
1458 struct musb_ep *musb_ep = to_musb_ep(ep);
1463 musb_ep->wedged = 1;
1465 return usb_ep_set_halt(ep);
1468 static int musb_gadget_fifo_status(struct usb_ep *ep)
1470 struct musb_ep *musb_ep = to_musb_ep(ep);
1471 void __iomem *epio = musb_ep->hw_ep->regs;
1472 int retval = -EINVAL;
1474 if (musb_ep->desc && !musb_ep->is_in) {
1475 struct musb *musb = musb_ep->musb;
1476 int epnum = musb_ep->current_epnum;
1477 void __iomem *mbase = musb->mregs;
1478 unsigned long flags;
1480 spin_lock_irqsave(&musb->lock, flags);
1482 musb_ep_select(mbase, epnum);
1483 /* FIXME return zero unless RXPKTRDY is set */
1484 retval = musb_readw(epio, MUSB_RXCOUNT);
1486 spin_unlock_irqrestore(&musb->lock, flags);
1491 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1493 struct musb_ep *musb_ep = to_musb_ep(ep);
1494 struct musb *musb = musb_ep->musb;
1495 u8 epnum = musb_ep->current_epnum;
1496 void __iomem *epio = musb->endpoints[epnum].regs;
1497 void __iomem *mbase;
1498 unsigned long flags;
1501 mbase = musb->mregs;
1503 spin_lock_irqsave(&musb->lock, flags);
1504 musb_ep_select(mbase, (u8) epnum);
1506 /* disable interrupts */
1507 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum));
1509 if (musb_ep->is_in) {
1510 csr = musb_readw(epio, MUSB_TXCSR);
1511 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1512 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1514 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1515 * to interrupt current FIFO loading, but not flushing
1516 * the already loaded ones.
1518 csr &= ~MUSB_TXCSR_TXPKTRDY;
1519 musb_writew(epio, MUSB_TXCSR, csr);
1520 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1521 musb_writew(epio, MUSB_TXCSR, csr);
1524 csr = musb_readw(epio, MUSB_RXCSR);
1525 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1526 musb_writew(epio, MUSB_RXCSR, csr);
1527 musb_writew(epio, MUSB_RXCSR, csr);
1530 /* re-enable interrupt */
1531 musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe);
1532 spin_unlock_irqrestore(&musb->lock, flags);
1535 static const struct usb_ep_ops musb_ep_ops = {
1536 .enable = musb_gadget_enable,
1537 .disable = musb_gadget_disable,
1538 .alloc_request = musb_alloc_request,
1539 .free_request = musb_free_request,
1540 .queue = musb_gadget_queue,
1541 .dequeue = musb_gadget_dequeue,
1542 .set_halt = musb_gadget_set_halt,
1543 .set_wedge = musb_gadget_set_wedge,
1544 .fifo_status = musb_gadget_fifo_status,
1545 .fifo_flush = musb_gadget_fifo_flush
1548 /* ----------------------------------------------------------------------- */
1550 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1552 struct musb *musb = gadget_to_musb(gadget);
1554 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1557 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1559 struct musb *musb = gadget_to_musb(gadget);
1560 void __iomem *mregs = musb->mregs;
1561 unsigned long flags;
1562 int status = -EINVAL;
1566 spin_lock_irqsave(&musb->lock, flags);
1568 switch (musb->xceiv->otg->state) {
1569 case OTG_STATE_B_PERIPHERAL:
1570 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1571 * that's part of the standard usb 1.1 state machine, and
1572 * doesn't affect OTG transitions.
1574 if (musb->may_wakeup && musb->is_suspended)
1577 case OTG_STATE_B_IDLE:
1578 /* Start SRP ... OTG not required. */
1579 devctl = musb_readb(mregs, MUSB_DEVCTL);
1580 musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
1581 devctl |= MUSB_DEVCTL_SESSION;
1582 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1583 devctl = musb_readb(mregs, MUSB_DEVCTL);
1585 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1586 devctl = musb_readb(mregs, MUSB_DEVCTL);
1591 while (devctl & MUSB_DEVCTL_SESSION) {
1592 devctl = musb_readb(mregs, MUSB_DEVCTL);
1597 spin_unlock_irqrestore(&musb->lock, flags);
1598 otg_start_srp(musb->xceiv->otg);
1599 spin_lock_irqsave(&musb->lock, flags);
1601 /* Block idling for at least 1s */
1602 musb_platform_try_idle(musb,
1603 jiffies + msecs_to_jiffies(1 * HZ));
1608 musb_dbg(musb, "Unhandled wake: %s",
1609 usb_otg_state_string(musb->xceiv->otg->state));
1615 power = musb_readb(mregs, MUSB_POWER);
1616 power |= MUSB_POWER_RESUME;
1617 musb_writeb(mregs, MUSB_POWER, power);
1618 musb_dbg(musb, "issue wakeup");
1620 /* FIXME do this next chunk in a timer callback, no udelay */
1623 power = musb_readb(mregs, MUSB_POWER);
1624 power &= ~MUSB_POWER_RESUME;
1625 musb_writeb(mregs, MUSB_POWER, power);
1627 spin_unlock_irqrestore(&musb->lock, flags);
1632 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1634 gadget->is_selfpowered = !!is_selfpowered;
1638 static void musb_pullup(struct musb *musb, int is_on)
1642 power = musb_readb(musb->mregs, MUSB_POWER);
1644 power |= MUSB_POWER_SOFTCONN;
1646 power &= ~MUSB_POWER_SOFTCONN;
1648 /* FIXME if on, HdrcStart; if off, HdrcStop */
1650 musb_dbg(musb, "gadget D+ pullup %s",
1651 is_on ? "on" : "off");
1652 musb_writeb(musb->mregs, MUSB_POWER, power);
1656 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1658 musb_dbg(musb, "<= %s =>\n", __func__);
1661 * FIXME iff driver's softconnect flag is set (as it is during probe,
1662 * though that can clear it), just musb_pullup().
1669 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1671 struct musb *musb = gadget_to_musb(gadget);
1673 if (!musb->xceiv->set_power)
1675 return usb_phy_set_power(musb->xceiv, mA);
1678 static void musb_gadget_work(struct work_struct *work)
1681 unsigned long flags;
1683 musb = container_of(work, struct musb, gadget_work.work);
1684 pm_runtime_get_sync(musb->controller);
1685 spin_lock_irqsave(&musb->lock, flags);
1686 musb_pullup(musb, musb->softconnect);
1687 spin_unlock_irqrestore(&musb->lock, flags);
1688 pm_runtime_mark_last_busy(musb->controller);
1689 pm_runtime_put_autosuspend(musb->controller);
1692 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1694 struct musb *musb = gadget_to_musb(gadget);
1695 unsigned long flags;
1699 /* NOTE: this assumes we are sensing vbus; we'd rather
1700 * not pullup unless the B-session is active.
1702 spin_lock_irqsave(&musb->lock, flags);
1703 if (is_on != musb->softconnect) {
1704 musb->softconnect = is_on;
1705 schedule_delayed_work(&musb->gadget_work, 0);
1707 spin_unlock_irqrestore(&musb->lock, flags);
1712 #ifdef CONFIG_BLACKFIN
1713 static struct usb_ep *musb_match_ep(struct usb_gadget *g,
1714 struct usb_endpoint_descriptor *desc,
1715 struct usb_ss_ep_comp_descriptor *ep_comp)
1717 struct usb_ep *ep = NULL;
1719 switch (usb_endpoint_type(desc)) {
1720 case USB_ENDPOINT_XFER_ISOC:
1721 case USB_ENDPOINT_XFER_BULK:
1722 if (usb_endpoint_dir_in(desc))
1723 ep = gadget_find_ep_by_name(g, "ep5in");
1725 ep = gadget_find_ep_by_name(g, "ep6out");
1727 case USB_ENDPOINT_XFER_INT:
1728 if (usb_endpoint_dir_in(desc))
1729 ep = gadget_find_ep_by_name(g, "ep1in");
1731 ep = gadget_find_ep_by_name(g, "ep2out");
1737 if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
1743 #define musb_match_ep NULL
1746 static int musb_gadget_start(struct usb_gadget *g,
1747 struct usb_gadget_driver *driver);
1748 static int musb_gadget_stop(struct usb_gadget *g);
1750 static const struct usb_gadget_ops musb_gadget_operations = {
1751 .get_frame = musb_gadget_get_frame,
1752 .wakeup = musb_gadget_wakeup,
1753 .set_selfpowered = musb_gadget_set_self_powered,
1754 /* .vbus_session = musb_gadget_vbus_session, */
1755 .vbus_draw = musb_gadget_vbus_draw,
1756 .pullup = musb_gadget_pullup,
1757 .udc_start = musb_gadget_start,
1758 .udc_stop = musb_gadget_stop,
1759 .match_ep = musb_match_ep,
1762 /* ----------------------------------------------------------------------- */
1766 /* Only this registration code "knows" the rule (from USB standards)
1767 * about there being only one external upstream port. It assumes
1768 * all peripheral ports are external...
1772 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1774 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1776 memset(ep, 0, sizeof *ep);
1778 ep->current_epnum = epnum;
1783 INIT_LIST_HEAD(&ep->req_list);
1785 sprintf(ep->name, "ep%d%s", epnum,
1786 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1787 is_in ? "in" : "out"));
1788 ep->end_point.name = ep->name;
1789 INIT_LIST_HEAD(&ep->end_point.ep_list);
1791 usb_ep_set_maxpacket_limit(&ep->end_point, 64);
1792 ep->end_point.caps.type_control = true;
1793 ep->end_point.ops = &musb_g_ep0_ops;
1794 musb->g.ep0 = &ep->end_point;
1797 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
1799 usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
1800 ep->end_point.caps.type_iso = true;
1801 ep->end_point.caps.type_bulk = true;
1802 ep->end_point.caps.type_int = true;
1803 ep->end_point.ops = &musb_ep_ops;
1804 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1807 if (!epnum || hw_ep->is_shared_fifo) {
1808 ep->end_point.caps.dir_in = true;
1809 ep->end_point.caps.dir_out = true;
1811 ep->end_point.caps.dir_in = true;
1813 ep->end_point.caps.dir_out = true;
1817 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1818 * to the rest of the driver state.
1820 static inline void musb_g_init_endpoints(struct musb *musb)
1823 struct musb_hw_ep *hw_ep;
1826 /* initialize endpoint list just once */
1827 INIT_LIST_HEAD(&(musb->g.ep_list));
1829 for (epnum = 0, hw_ep = musb->endpoints;
1830 epnum < musb->nr_endpoints;
1832 if (hw_ep->is_shared_fifo /* || !epnum */) {
1833 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1836 if (hw_ep->max_packet_sz_tx) {
1837 init_peripheral_ep(musb, &hw_ep->ep_in,
1841 if (hw_ep->max_packet_sz_rx) {
1842 init_peripheral_ep(musb, &hw_ep->ep_out,
1850 /* called once during driver setup to initialize and link into
1851 * the driver model; memory is zeroed.
1853 int musb_gadget_setup(struct musb *musb)
1857 /* REVISIT minor race: if (erroneously) setting up two
1858 * musb peripherals at the same time, only the bus lock
1862 musb->g.ops = &musb_gadget_operations;
1863 musb->g.max_speed = USB_SPEED_HIGH;
1864 musb->g.speed = USB_SPEED_UNKNOWN;
1866 MUSB_DEV_MODE(musb);
1867 musb->xceiv->otg->default_a = 0;
1868 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1870 /* this "gadget" abstracts/virtualizes the controller */
1871 musb->g.name = musb_driver_name;
1872 #if IS_ENABLED(CONFIG_USB_MUSB_DUAL_ROLE)
1874 #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
1877 INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
1878 musb_g_init_endpoints(musb);
1880 musb->is_active = 0;
1881 musb_platform_try_idle(musb, 0);
1883 status = usb_add_gadget_udc(musb->controller, &musb->g);
1889 musb->g.dev.parent = NULL;
1890 device_unregister(&musb->g.dev);
1894 void musb_gadget_cleanup(struct musb *musb)
1896 if (musb->port_mode == MUSB_PORT_MODE_HOST)
1899 cancel_delayed_work_sync(&musb->gadget_work);
1900 usb_del_gadget_udc(&musb->g);
1904 * Register the gadget driver. Used by gadget drivers when
1905 * registering themselves with the controller.
1907 * -EINVAL something went wrong (not driver)
1908 * -EBUSY another gadget is already using the controller
1909 * -ENOMEM no memory to perform the operation
1911 * @param driver the gadget driver
1912 * @return <0 if error, 0 if everything is fine
1914 static int musb_gadget_start(struct usb_gadget *g,
1915 struct usb_gadget_driver *driver)
1917 struct musb *musb = gadget_to_musb(g);
1918 struct usb_otg *otg = musb->xceiv->otg;
1919 unsigned long flags;
1922 if (driver->max_speed < USB_SPEED_HIGH) {
1927 pm_runtime_get_sync(musb->controller);
1929 musb->softconnect = 0;
1930 musb->gadget_driver = driver;
1932 spin_lock_irqsave(&musb->lock, flags);
1933 musb->is_active = 1;
1935 otg_set_peripheral(otg, &musb->g);
1936 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
1937 spin_unlock_irqrestore(&musb->lock, flags);
1941 /* REVISIT: funcall to other code, which also
1942 * handles power budgeting ... this way also
1943 * ensures HdrcStart is indirectly called.
1945 if (musb->xceiv->last_event == USB_EVENT_ID)
1946 musb_platform_set_vbus(musb, 1);
1948 pm_runtime_mark_last_busy(musb->controller);
1949 pm_runtime_put_autosuspend(musb->controller);
1958 * Unregister the gadget driver. Used by gadget drivers when
1959 * unregistering themselves from the controller.
1961 * @param driver the gadget driver to unregister
1963 static int musb_gadget_stop(struct usb_gadget *g)
1965 struct musb *musb = gadget_to_musb(g);
1966 unsigned long flags;
1968 pm_runtime_get_sync(musb->controller);
1971 * REVISIT always use otg_set_peripheral() here too;
1972 * this needs to shut down the OTG engine.
1975 spin_lock_irqsave(&musb->lock, flags);
1977 musb_hnp_stop(musb);
1979 (void) musb_gadget_vbus_draw(&musb->g, 0);
1981 musb->xceiv->otg->state = OTG_STATE_UNDEFINED;
1983 otg_set_peripheral(musb->xceiv->otg, NULL);
1985 musb->is_active = 0;
1986 musb->gadget_driver = NULL;
1987 musb_platform_try_idle(musb, 0);
1988 spin_unlock_irqrestore(&musb->lock, flags);
1991 * FIXME we need to be able to register another
1992 * gadget driver here and have everything work;
1993 * that currently misbehaves.
1996 /* Force check of devctl register for PM runtime */
1997 schedule_delayed_work(&musb->irq_work, 0);
1999 pm_runtime_mark_last_busy(musb->controller);
2000 pm_runtime_put_autosuspend(musb->controller);
2005 /* ----------------------------------------------------------------------- */
2007 /* lifecycle operations called through plat_uds.c */
2009 void musb_g_resume(struct musb *musb)
2011 musb->is_suspended = 0;
2012 switch (musb->xceiv->otg->state) {
2013 case OTG_STATE_B_IDLE:
2015 case OTG_STATE_B_WAIT_ACON:
2016 case OTG_STATE_B_PERIPHERAL:
2017 musb->is_active = 1;
2018 if (musb->gadget_driver && musb->gadget_driver->resume) {
2019 spin_unlock(&musb->lock);
2020 musb->gadget_driver->resume(&musb->g);
2021 spin_lock(&musb->lock);
2025 WARNING("unhandled RESUME transition (%s)\n",
2026 usb_otg_state_string(musb->xceiv->otg->state));
2030 /* called when SOF packets stop for 3+ msec */
2031 void musb_g_suspend(struct musb *musb)
2035 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2036 musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
2038 switch (musb->xceiv->otg->state) {
2039 case OTG_STATE_B_IDLE:
2040 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2041 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2043 case OTG_STATE_B_PERIPHERAL:
2044 musb->is_suspended = 1;
2045 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2046 spin_unlock(&musb->lock);
2047 musb->gadget_driver->suspend(&musb->g);
2048 spin_lock(&musb->lock);
2052 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2053 * A_PERIPHERAL may need care too
2055 WARNING("unhandled SUSPEND transition (%s)",
2056 usb_otg_state_string(musb->xceiv->otg->state));
2060 /* Called during SRP */
2061 void musb_g_wakeup(struct musb *musb)
2063 musb_gadget_wakeup(&musb->g);
2066 /* called when VBUS drops below session threshold, and in other cases */
2067 void musb_g_disconnect(struct musb *musb)
2069 void __iomem *mregs = musb->mregs;
2070 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2072 musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
2075 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2077 /* don't draw vbus until new b-default session */
2078 (void) musb_gadget_vbus_draw(&musb->g, 0);
2080 musb->g.speed = USB_SPEED_UNKNOWN;
2081 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2082 spin_unlock(&musb->lock);
2083 musb->gadget_driver->disconnect(&musb->g);
2084 spin_lock(&musb->lock);
2087 switch (musb->xceiv->otg->state) {
2089 musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
2090 usb_otg_state_string(musb->xceiv->otg->state));
2091 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2092 MUSB_HST_MODE(musb);
2094 case OTG_STATE_A_PERIPHERAL:
2095 musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
2096 MUSB_HST_MODE(musb);
2098 case OTG_STATE_B_WAIT_ACON:
2099 case OTG_STATE_B_HOST:
2100 case OTG_STATE_B_PERIPHERAL:
2101 case OTG_STATE_B_IDLE:
2102 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2104 case OTG_STATE_B_SRP_INIT:
2108 musb->is_active = 0;
2111 void musb_g_reset(struct musb *musb)
2112 __releases(musb->lock)
2113 __acquires(musb->lock)
2115 void __iomem *mbase = musb->mregs;
2116 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2119 musb_dbg(musb, "<== %s driver '%s'",
2120 (devctl & MUSB_DEVCTL_BDEVICE)
2121 ? "B-Device" : "A-Device",
2123 ? musb->gadget_driver->driver.name
2127 /* report reset, if we didn't already (flushing EP state) */
2128 if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) {
2129 spin_unlock(&musb->lock);
2130 usb_gadget_udc_reset(&musb->g, musb->gadget_driver);
2131 spin_lock(&musb->lock);
2135 else if (devctl & MUSB_DEVCTL_HR)
2136 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2139 /* what speed did we negotiate? */
2140 power = musb_readb(mbase, MUSB_POWER);
2141 musb->g.speed = (power & MUSB_POWER_HSMODE)
2142 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2144 /* start in USB_STATE_DEFAULT */
2145 musb->is_active = 1;
2146 musb->is_suspended = 0;
2147 MUSB_DEV_MODE(musb);
2149 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2151 musb->may_wakeup = 0;
2152 musb->g.b_hnp_enable = 0;
2153 musb->g.a_alt_hnp_support = 0;
2154 musb->g.a_hnp_support = 0;
2155 musb->g.quirk_zlp_not_supp = 1;
2157 /* Normal reset, as B-Device;
2158 * or else after HNP, as A-Device
2160 if (!musb->g.is_otg) {
2161 /* USB device controllers that are not OTG compatible
2162 * may not have DEVCTL register in silicon.
2163 * In that case, do not rely on devctl for setting
2166 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2167 musb->g.is_a_peripheral = 0;
2168 } else if (devctl & MUSB_DEVCTL_BDEVICE) {
2169 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
2170 musb->g.is_a_peripheral = 0;
2172 musb->xceiv->otg->state = OTG_STATE_A_PERIPHERAL;
2173 musb->g.is_a_peripheral = 1;
2176 /* start with default limits on VBUS power draw */
2177 (void) musb_gadget_vbus_draw(&musb->g, 8);