2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/list.h>
43 #include <linux/dma-mapping.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
48 /* MUSB HOST status 22-mar-2006
50 * - There's still lots of partial code duplication for fault paths, so
51 * they aren't handled as consistently as they need to be.
53 * - PIO mostly behaved when last tested.
54 * + including ep0, with all usbtest cases 9, 10
55 * + usbtest 14 (ep0out) doesn't seem to run at all
56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 * configurations, but otherwise double buffering passes basic tests.
58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 * + about 1/15 the speed of typical EHCI implementations (PCI)
62 * + RX, all too often reqpkt seems to misbehave after tx
63 * + TX, no known issues (other than evident silicon issue)
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
68 * starvation ... nothing yet for TX, interrupt, or bulk.
70 * - Not tested with HNP, but some SRP paths seem to behave.
72 * NOTE 24-August-2006:
74 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
75 * extra endpoint for periodic use enabling hub + keybd + mouse. That
76 * mostly works, except that with "usbnet" it's easy to trigger cases
77 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
78 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
79 * although ARP RX wins. (That test was done with a full speed link.)
84 * NOTE on endpoint usage:
86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
88 * (Yes, bulk _could_ use more of the endpoints than that, and would even
91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
92 * So far that scheduling is both dumb and optimistic: the endpoint will be
93 * "claimed" until its software queue is no longer refilled. No multiplexing
94 * of transfers between endpoints, or anything clever.
97 struct musb *hcd_to_musb(struct usb_hcd *hcd)
99 return *(struct musb **) hcd->hcd_priv;
103 static void musb_ep_program(struct musb *musb, u8 epnum,
104 struct urb *urb, int is_out,
105 u8 *buf, u32 offset, u32 len);
108 * Clear TX fifo. Needed to avoid BABBLE errors.
110 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 struct musb *musb = ep->musb;
113 void __iomem *epio = ep->regs;
118 csr = musb_readw(epio, MUSB_TXCSR);
119 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
121 dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
123 csr |= MUSB_TXCSR_FLUSHFIFO;
124 musb_writew(epio, MUSB_TXCSR, csr);
125 csr = musb_readw(epio, MUSB_TXCSR);
126 if (WARN(retries-- < 1,
127 "Could not flush host TX%d fifo: csr: %04x\n",
134 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
136 void __iomem *epio = ep->regs;
140 /* scrub any data left in the fifo */
142 csr = musb_readw(epio, MUSB_TXCSR);
143 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
145 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
146 csr = musb_readw(epio, MUSB_TXCSR);
150 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
153 /* and reset for the next transfer */
154 musb_writew(epio, MUSB_TXCSR, 0);
158 * Start transmit. Caller is responsible for locking shared resources.
159 * musb must be locked.
161 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
165 /* NOTE: no locks here; caller should lock and select EP */
167 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
168 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
169 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
171 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
172 musb_writew(ep->regs, MUSB_CSR0, txcsr);
177 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
181 /* NOTE: no locks here; caller should lock and select EP */
182 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
183 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
184 if (is_cppi_enabled())
185 txcsr |= MUSB_TXCSR_DMAMODE;
186 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
189 static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
191 if (is_in != 0 || ep->is_shared_fifo)
193 if (is_in == 0 || ep->is_shared_fifo)
197 static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
199 return is_in ? ep->in_qh : ep->out_qh;
203 * Start the URB at the front of an endpoint's queue
204 * end must be claimed from the caller.
206 * Context: controller locked, irqs blocked
209 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
213 void __iomem *mbase = musb->mregs;
214 struct urb *urb = next_urb(qh);
215 void *buf = urb->transfer_buffer;
217 struct musb_hw_ep *hw_ep = qh->hw_ep;
218 unsigned pipe = urb->pipe;
219 u8 address = usb_pipedevice(pipe);
220 int epnum = hw_ep->epnum;
222 /* initialize software qh state */
226 /* gather right source of data */
228 case USB_ENDPOINT_XFER_CONTROL:
229 /* control transfers always start with SETUP */
231 musb->ep0_stage = MUSB_EP0_START;
232 buf = urb->setup_packet;
235 case USB_ENDPOINT_XFER_ISOC:
238 offset = urb->iso_frame_desc[0].offset;
239 len = urb->iso_frame_desc[0].length;
241 default: /* bulk, interrupt */
242 /* actual_length may be nonzero on retry paths */
243 buf = urb->transfer_buffer + urb->actual_length;
244 len = urb->transfer_buffer_length - urb->actual_length;
247 dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
248 qh, urb, address, qh->epnum,
249 is_in ? "in" : "out",
250 ({char *s; switch (qh->type) {
251 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
252 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
253 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
254 default: s = "-intr"; break;
256 epnum, buf + offset, len);
258 /* Configure endpoint */
259 musb_ep_set_qh(hw_ep, is_in, qh);
260 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
262 /* transmit may have more work: start it when it is time */
266 /* determine if the time is right for a periodic transfer */
268 case USB_ENDPOINT_XFER_ISOC:
269 case USB_ENDPOINT_XFER_INT:
270 dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
271 frame = musb_readw(mbase, MUSB_FRAME);
272 /* FIXME this doesn't implement that scheduling policy ...
273 * or handle framecounter wrapping
275 if (1) { /* Always assume URB_ISO_ASAP */
276 /* REVISIT the SOF irq handler shouldn't duplicate
277 * this code; and we don't init urb->start_frame...
282 qh->frame = urb->start_frame;
283 /* enable SOF interrupt so we can count down */
284 dev_dbg(musb->controller, "SOF for %d\n", epnum);
285 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
286 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
292 dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
293 hw_ep->tx_channel ? "dma" : "pio");
295 if (!hw_ep->tx_channel)
296 musb_h_tx_start(hw_ep);
297 else if (is_cppi_enabled() || tusb_dma_omap())
298 musb_h_tx_dma_start(hw_ep);
302 /* Context: caller owns controller lock, IRQs are blocked */
303 static void musb_giveback(struct musb *musb, struct urb *urb, int status)
304 __releases(musb->lock)
305 __acquires(musb->lock)
307 dev_dbg(musb->controller,
308 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
309 urb, urb->complete, status,
310 usb_pipedevice(urb->pipe),
311 usb_pipeendpoint(urb->pipe),
312 usb_pipein(urb->pipe) ? "in" : "out",
313 urb->actual_length, urb->transfer_buffer_length
316 usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
317 spin_unlock(&musb->lock);
318 usb_hcd_giveback_urb(musb->hcd, urb, status);
319 spin_lock(&musb->lock);
322 /* For bulk/interrupt endpoints only */
323 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
326 void __iomem *epio = qh->hw_ep->regs;
330 * FIXME: the current Mentor DMA code seems to have
331 * problems getting toggle correct.
335 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
337 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
339 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
343 * Advance this hardware endpoint's queue, completing the specified URB and
344 * advancing to either the next URB queued to that qh, or else invalidating
345 * that qh and advancing to the next qh scheduled after the current one.
347 * Context: caller owns controller lock, IRQs are blocked
349 static void musb_advance_schedule(struct musb *musb, struct urb *urb,
350 struct musb_hw_ep *hw_ep, int is_in)
352 struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in);
353 struct musb_hw_ep *ep = qh->hw_ep;
354 int ready = qh->is_ready;
357 status = (urb->status == -EINPROGRESS) ? 0 : urb->status;
359 /* save toggle eagerly, for paranoia */
361 case USB_ENDPOINT_XFER_BULK:
362 case USB_ENDPOINT_XFER_INT:
363 musb_save_toggle(qh, is_in, urb);
365 case USB_ENDPOINT_XFER_ISOC:
366 if (status == 0 && urb->error_count)
372 musb_giveback(musb, urb, status);
373 qh->is_ready = ready;
375 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
376 * invalidate qh as soon as list_empty(&hep->urb_list)
378 if (list_empty(&qh->hep->urb_list)) {
379 struct list_head *head;
380 struct dma_controller *dma = musb->dma_controller;
384 if (ep->rx_channel) {
385 dma->channel_release(ep->rx_channel);
386 ep->rx_channel = NULL;
390 if (ep->tx_channel) {
391 dma->channel_release(ep->tx_channel);
392 ep->tx_channel = NULL;
396 /* Clobber old pointers to this qh */
397 musb_ep_set_qh(ep, is_in, NULL);
398 qh->hep->hcpriv = NULL;
402 case USB_ENDPOINT_XFER_CONTROL:
403 case USB_ENDPOINT_XFER_BULK:
404 /* fifo policy for these lists, except that NAKing
405 * should rotate a qh to the end (for fairness).
408 head = qh->ring.prev;
415 case USB_ENDPOINT_XFER_ISOC:
416 case USB_ENDPOINT_XFER_INT:
417 /* this is where periodic bandwidth should be
418 * de-allocated if it's tracked and allocated;
419 * and where we'd update the schedule tree...
427 if (qh != NULL && qh->is_ready) {
428 dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
429 hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
430 musb_start_urb(musb, is_in, qh);
434 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
436 /* we don't want fifo to fill itself again;
437 * ignore dma (various models),
438 * leave toggle alone (may not have been saved yet)
440 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
441 csr &= ~(MUSB_RXCSR_H_REQPKT
442 | MUSB_RXCSR_H_AUTOREQ
443 | MUSB_RXCSR_AUTOCLEAR);
445 /* write 2x to allow double buffering */
446 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
447 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
449 /* flush writebuffer */
450 return musb_readw(hw_ep->regs, MUSB_RXCSR);
454 * PIO RX for a packet (or part of it).
457 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
465 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
466 void __iomem *epio = hw_ep->regs;
467 struct musb_qh *qh = hw_ep->in_qh;
468 int pipe = urb->pipe;
469 void *buffer = urb->transfer_buffer;
471 /* musb_ep_select(mbase, epnum); */
472 rx_count = musb_readw(epio, MUSB_RXCOUNT);
473 dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
474 urb->transfer_buffer, qh->offset,
475 urb->transfer_buffer_length);
478 if (usb_pipeisoc(pipe)) {
480 struct usb_iso_packet_descriptor *d;
487 d = urb->iso_frame_desc + qh->iso_idx;
488 buf = buffer + d->offset;
490 if (rx_count > length) {
495 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
499 urb->actual_length += length;
500 d->actual_length = length;
504 /* see if we are done */
505 done = (++qh->iso_idx >= urb->number_of_packets);
508 buf = buffer + qh->offset;
509 length = urb->transfer_buffer_length - qh->offset;
510 if (rx_count > length) {
511 if (urb->status == -EINPROGRESS)
512 urb->status = -EOVERFLOW;
513 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
517 urb->actual_length += length;
518 qh->offset += length;
520 /* see if we are done */
521 done = (urb->actual_length == urb->transfer_buffer_length)
522 || (rx_count < qh->maxpacket)
523 || (urb->status != -EINPROGRESS);
525 && (urb->status == -EINPROGRESS)
526 && (urb->transfer_flags & URB_SHORT_NOT_OK)
527 && (urb->actual_length
528 < urb->transfer_buffer_length))
529 urb->status = -EREMOTEIO;
532 musb_read_fifo(hw_ep, length, buf);
534 csr = musb_readw(epio, MUSB_RXCSR);
535 csr |= MUSB_RXCSR_H_WZC_BITS;
536 if (unlikely(do_flush))
537 musb_h_flush_rxfifo(hw_ep, csr);
539 /* REVISIT this assumes AUTOCLEAR is never set */
540 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
542 csr |= MUSB_RXCSR_H_REQPKT;
543 musb_writew(epio, MUSB_RXCSR, csr);
549 /* we don't always need to reinit a given side of an endpoint...
550 * when we do, use tx/rx reinit routine and then construct a new CSR
551 * to address data toggle, NYET, and DMA or PIO.
553 * it's possible that driver bugs (especially for DMA) or aborting a
554 * transfer might have left the endpoint busier than it should be.
555 * the busy/not-empty tests are basically paranoia.
558 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
562 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
563 * That always uses tx_reinit since ep0 repurposes TX register
564 * offsets; the initial SETUP packet is also a kind of OUT.
567 /* if programmed for Tx, put it in RX mode */
568 if (ep->is_shared_fifo) {
569 csr = musb_readw(ep->regs, MUSB_TXCSR);
570 if (csr & MUSB_TXCSR_MODE) {
571 musb_h_tx_flush_fifo(ep);
572 csr = musb_readw(ep->regs, MUSB_TXCSR);
573 musb_writew(ep->regs, MUSB_TXCSR,
574 csr | MUSB_TXCSR_FRCDATATOG);
578 * Clear the MODE bit (and everything else) to enable Rx.
579 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
581 if (csr & MUSB_TXCSR_DMAMODE)
582 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
583 musb_writew(ep->regs, MUSB_TXCSR, 0);
585 /* scrub all previous state, clearing toggle */
587 csr = musb_readw(ep->regs, MUSB_RXCSR);
588 if (csr & MUSB_RXCSR_RXPKTRDY)
589 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
590 musb_readw(ep->regs, MUSB_RXCOUNT));
592 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
595 /* target addr and (for multipoint) hub addr/port */
596 if (musb->is_multipoint) {
597 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
598 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
599 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
602 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
604 /* protocol/endpoint, interval/NAKlimit, i/o size */
605 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
606 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
607 /* NOTE: bulk combining rewrites high bits of maxpacket */
608 /* Set RXMAXP with the FIFO size of the endpoint
609 * to disable double buffer mode.
611 if (musb->double_buffer_not_ok)
612 musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
614 musb_writew(ep->regs, MUSB_RXMAXP,
615 qh->maxpacket | ((qh->hb_mult - 1) << 11));
620 static bool musb_tx_dma_program(struct dma_controller *dma,
621 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
622 struct urb *urb, u32 offset, u32 length)
624 struct dma_channel *channel = hw_ep->tx_channel;
625 void __iomem *epio = hw_ep->regs;
626 u16 pkt_size = qh->maxpacket;
630 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
631 if (length > channel->max_len)
632 length = channel->max_len;
634 csr = musb_readw(epio, MUSB_TXCSR);
635 if (length > pkt_size) {
637 csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB;
638 /* autoset shouldn't be set in high bandwidth */
640 * Enable Autoset according to table
642 * bulk_split hb_mult Autoset_Enable
644 * 0 >1 No(High BW ISO)
648 if (qh->hb_mult == 1 || (qh->hb_mult > 1 &&
649 can_bulk_split(hw_ep->musb, qh->type)))
650 csr |= MUSB_TXCSR_AUTOSET;
653 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
654 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
656 channel->desired_mode = mode;
657 musb_writew(epio, MUSB_TXCSR, csr);
659 if (!is_cppi_enabled() && !tusb_dma_omap())
662 channel->actual_len = 0;
665 * TX uses "RNDIS" mode automatically but needs help
666 * to identify the zero-length-final-packet case.
668 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
671 qh->segsize = length;
674 * Ensure the data reaches to main memory before starting
679 if (!dma->channel_program(channel, pkt_size, mode,
680 urb->transfer_dma + offset, length)) {
681 dma->channel_release(channel);
682 hw_ep->tx_channel = NULL;
684 csr = musb_readw(epio, MUSB_TXCSR);
685 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
686 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
693 * Program an HDRC endpoint as per the given URB
694 * Context: irqs blocked, controller lock held
696 static void musb_ep_program(struct musb *musb, u8 epnum,
697 struct urb *urb, int is_out,
698 u8 *buf, u32 offset, u32 len)
700 struct dma_controller *dma_controller;
701 struct dma_channel *dma_channel;
703 void __iomem *mbase = musb->mregs;
704 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
705 void __iomem *epio = hw_ep->regs;
706 struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
707 u16 packet_sz = qh->maxpacket;
711 dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
712 "h_addr%02x h_port%02x bytes %d\n",
713 is_out ? "-->" : "<--",
714 epnum, urb, urb->dev->speed,
715 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
716 qh->h_addr_reg, qh->h_port_reg,
719 musb_ep_select(mbase, epnum);
721 if (is_out && !len) {
723 csr = musb_readw(epio, MUSB_TXCSR);
724 csr &= ~MUSB_TXCSR_DMAENAB;
725 musb_writew(epio, MUSB_TXCSR, csr);
726 hw_ep->tx_channel = NULL;
729 /* candidate for DMA? */
730 dma_controller = musb->dma_controller;
731 if (use_dma && is_dma_capable() && epnum && dma_controller) {
732 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
734 dma_channel = dma_controller->channel_alloc(
735 dma_controller, hw_ep, is_out);
737 hw_ep->tx_channel = dma_channel;
739 hw_ep->rx_channel = dma_channel;
744 /* make sure we clear DMAEnab, autoSet bits from previous run */
746 /* OUT/transmit/EP0 or IN/receive? */
752 csr = musb_readw(epio, MUSB_TXCSR);
754 /* disable interrupt in case we flush */
755 int_txe = musb->intrtxe;
756 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
758 /* general endpoint setup */
760 /* flush all old state, set default */
762 * We could be flushing valid
763 * packets in double buffering
766 if (!hw_ep->tx_double_buffered)
767 musb_h_tx_flush_fifo(hw_ep);
770 * We must not clear the DMAMODE bit before or in
771 * the same cycle with the DMAENAB bit, so we clear
772 * the latter first...
774 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
777 | MUSB_TXCSR_FRCDATATOG
778 | MUSB_TXCSR_H_RXSTALL
780 | MUSB_TXCSR_TXPKTRDY
782 csr |= MUSB_TXCSR_MODE;
784 if (!hw_ep->tx_double_buffered) {
785 if (usb_gettoggle(urb->dev, qh->epnum, 1))
786 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
787 | MUSB_TXCSR_H_DATATOGGLE;
789 csr |= MUSB_TXCSR_CLRDATATOG;
792 musb_writew(epio, MUSB_TXCSR, csr);
793 /* REVISIT may need to clear FLUSHFIFO ... */
794 csr &= ~MUSB_TXCSR_DMAMODE;
795 musb_writew(epio, MUSB_TXCSR, csr);
796 csr = musb_readw(epio, MUSB_TXCSR);
798 /* endpoint 0: just flush */
799 musb_h_ep0_flush_fifo(hw_ep);
802 /* target addr and (for multipoint) hub addr/port */
803 if (musb->is_multipoint) {
804 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
805 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
806 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
807 /* FIXME if !epnum, do the same for RX ... */
809 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
811 /* protocol/endpoint/interval/NAKlimit */
813 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
814 if (musb->double_buffer_not_ok) {
815 musb_writew(epio, MUSB_TXMAXP,
816 hw_ep->max_packet_sz_tx);
817 } else if (can_bulk_split(musb, qh->type)) {
818 qh->hb_mult = hw_ep->max_packet_sz_tx
820 musb_writew(epio, MUSB_TXMAXP, packet_sz
821 | ((qh->hb_mult) - 1) << 11);
823 musb_writew(epio, MUSB_TXMAXP,
825 ((qh->hb_mult - 1) << 11));
827 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
829 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
830 if (musb->is_multipoint)
831 musb_writeb(epio, MUSB_TYPE0,
835 if (can_bulk_split(musb, qh->type))
836 load_count = min((u32) hw_ep->max_packet_sz_tx,
839 load_count = min((u32) packet_sz, len);
841 if (dma_channel && musb_tx_dma_program(dma_controller,
842 hw_ep, qh, urb, offset, len))
846 /* PIO to load FIFO */
847 qh->segsize = load_count;
849 sg_miter_start(&qh->sg_miter, urb->sg, 1,
852 if (!sg_miter_next(&qh->sg_miter)) {
853 dev_err(musb->controller,
856 sg_miter_stop(&qh->sg_miter);
859 buf = qh->sg_miter.addr + urb->sg->offset +
861 load_count = min_t(u32, load_count,
862 qh->sg_miter.length);
863 musb_write_fifo(hw_ep, load_count, buf);
864 qh->sg_miter.consumed = load_count;
865 sg_miter_stop(&qh->sg_miter);
867 musb_write_fifo(hw_ep, load_count, buf);
870 /* re-enable interrupt */
871 musb_writew(mbase, MUSB_INTRTXE, int_txe);
877 if (hw_ep->rx_reinit) {
878 musb_rx_reinit(musb, qh, hw_ep);
880 /* init new state: toggle and NYET, maybe DMA later */
881 if (usb_gettoggle(urb->dev, qh->epnum, 0))
882 csr = MUSB_RXCSR_H_WR_DATATOGGLE
883 | MUSB_RXCSR_H_DATATOGGLE;
886 if (qh->type == USB_ENDPOINT_XFER_INT)
887 csr |= MUSB_RXCSR_DISNYET;
890 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
892 if (csr & (MUSB_RXCSR_RXPKTRDY
894 | MUSB_RXCSR_H_REQPKT))
895 ERR("broken !rx_reinit, ep%d csr %04x\n",
898 /* scrub any stale state, leaving toggle alone */
899 csr &= MUSB_RXCSR_DISNYET;
902 /* kick things off */
904 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
905 /* Candidate for DMA */
906 dma_channel->actual_len = 0L;
909 /* AUTOREQ is in a DMA register */
910 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
911 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
914 * Unless caller treats short RX transfers as
915 * errors, we dare not queue multiple transfers.
917 dma_ok = dma_controller->channel_program(dma_channel,
918 packet_sz, !(urb->transfer_flags &
920 urb->transfer_dma + offset,
923 dma_controller->channel_release(dma_channel);
924 hw_ep->rx_channel = dma_channel = NULL;
926 csr |= MUSB_RXCSR_DMAENAB;
929 csr |= MUSB_RXCSR_H_REQPKT;
930 dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
931 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
932 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
936 /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
937 * the end; avoids starvation for other endpoints.
939 static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
942 struct dma_channel *dma;
944 void __iomem *mbase = musb->mregs;
945 void __iomem *epio = ep->regs;
946 struct musb_qh *cur_qh, *next_qh;
949 musb_ep_select(mbase, ep->epnum);
951 dma = is_dma_capable() ? ep->rx_channel : NULL;
953 /* clear nak timeout bit */
954 rx_csr = musb_readw(epio, MUSB_RXCSR);
955 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
956 rx_csr &= ~MUSB_RXCSR_DATAERROR;
957 musb_writew(epio, MUSB_RXCSR, rx_csr);
959 cur_qh = first_qh(&musb->in_bulk);
961 dma = is_dma_capable() ? ep->tx_channel : NULL;
963 /* clear nak timeout bit */
964 tx_csr = musb_readw(epio, MUSB_TXCSR);
965 tx_csr |= MUSB_TXCSR_H_WZC_BITS;
966 tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
967 musb_writew(epio, MUSB_TXCSR, tx_csr);
969 cur_qh = first_qh(&musb->out_bulk);
972 urb = next_urb(cur_qh);
973 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
974 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
975 musb->dma_controller->channel_abort(dma);
976 urb->actual_length += dma->actual_len;
977 dma->actual_len = 0L;
979 musb_save_toggle(cur_qh, is_in, urb);
982 /* move cur_qh to end of queue */
983 list_move_tail(&cur_qh->ring, &musb->in_bulk);
985 /* get the next qh from musb->in_bulk */
986 next_qh = first_qh(&musb->in_bulk);
988 /* set rx_reinit and schedule the next qh */
991 /* move cur_qh to end of queue */
992 list_move_tail(&cur_qh->ring, &musb->out_bulk);
994 /* get the next qh from musb->out_bulk */
995 next_qh = first_qh(&musb->out_bulk);
997 /* set tx_reinit and schedule the next qh */
1000 musb_start_urb(musb, is_in, next_qh);
1005 * Service the default endpoint (ep0) as host.
1006 * Return true until it's time to start the status stage.
1008 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
1011 u8 *fifo_dest = NULL;
1013 struct musb_hw_ep *hw_ep = musb->control_ep;
1014 struct musb_qh *qh = hw_ep->in_qh;
1015 struct usb_ctrlrequest *request;
1017 switch (musb->ep0_stage) {
1019 fifo_dest = urb->transfer_buffer + urb->actual_length;
1020 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
1021 urb->actual_length);
1022 if (fifo_count < len)
1023 urb->status = -EOVERFLOW;
1025 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
1027 urb->actual_length += fifo_count;
1028 if (len < qh->maxpacket) {
1029 /* always terminate on short read; it's
1030 * rarely reported as an error.
1032 } else if (urb->actual_length <
1033 urb->transfer_buffer_length)
1036 case MUSB_EP0_START:
1037 request = (struct usb_ctrlrequest *) urb->setup_packet;
1039 if (!request->wLength) {
1040 dev_dbg(musb->controller, "start no-DATA\n");
1042 } else if (request->bRequestType & USB_DIR_IN) {
1043 dev_dbg(musb->controller, "start IN-DATA\n");
1044 musb->ep0_stage = MUSB_EP0_IN;
1048 dev_dbg(musb->controller, "start OUT-DATA\n");
1049 musb->ep0_stage = MUSB_EP0_OUT;
1054 fifo_count = min_t(size_t, qh->maxpacket,
1055 urb->transfer_buffer_length -
1056 urb->actual_length);
1058 fifo_dest = (u8 *) (urb->transfer_buffer
1059 + urb->actual_length);
1060 dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
1062 (fifo_count == 1) ? "" : "s",
1064 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1066 urb->actual_length += fifo_count;
1071 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1079 * Handle default endpoint interrupt as host. Only called in IRQ time
1080 * from musb_interrupt().
1082 * called with controller irqlocked
1084 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1089 void __iomem *mbase = musb->mregs;
1090 struct musb_hw_ep *hw_ep = musb->control_ep;
1091 void __iomem *epio = hw_ep->regs;
1092 struct musb_qh *qh = hw_ep->in_qh;
1093 bool complete = false;
1094 irqreturn_t retval = IRQ_NONE;
1096 /* ep0 only has one queue, "in" */
1099 musb_ep_select(mbase, 0);
1100 csr = musb_readw(epio, MUSB_CSR0);
1101 len = (csr & MUSB_CSR0_RXPKTRDY)
1102 ? musb_readb(epio, MUSB_COUNT0)
1105 dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1106 csr, qh, len, urb, musb->ep0_stage);
1108 /* if we just did status stage, we are done */
1109 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1110 retval = IRQ_HANDLED;
1114 /* prepare status */
1115 if (csr & MUSB_CSR0_H_RXSTALL) {
1116 dev_dbg(musb->controller, "STALLING ENDPOINT\n");
1119 } else if (csr & MUSB_CSR0_H_ERROR) {
1120 dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
1123 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1124 dev_dbg(musb->controller, "control NAK timeout\n");
1126 /* NOTE: this code path would be a good place to PAUSE a
1127 * control transfer, if another one is queued, so that
1128 * ep0 is more likely to stay busy. That's already done
1129 * for bulk RX transfers.
1131 * if (qh->ring.next != &musb->control), then
1132 * we have a candidate... NAKing is *NOT* an error
1134 musb_writew(epio, MUSB_CSR0, 0);
1135 retval = IRQ_HANDLED;
1139 dev_dbg(musb->controller, "aborting\n");
1140 retval = IRQ_HANDLED;
1142 urb->status = status;
1145 /* use the proper sequence to abort the transfer */
1146 if (csr & MUSB_CSR0_H_REQPKT) {
1147 csr &= ~MUSB_CSR0_H_REQPKT;
1148 musb_writew(epio, MUSB_CSR0, csr);
1149 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1150 musb_writew(epio, MUSB_CSR0, csr);
1152 musb_h_ep0_flush_fifo(hw_ep);
1155 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1158 musb_writew(epio, MUSB_CSR0, 0);
1161 if (unlikely(!urb)) {
1162 /* stop endpoint since we have no place for its data, this
1163 * SHOULD NEVER HAPPEN! */
1164 ERR("no URB for end 0\n");
1166 musb_h_ep0_flush_fifo(hw_ep);
1171 /* call common logic and prepare response */
1172 if (musb_h_ep0_continue(musb, len, urb)) {
1173 /* more packets required */
1174 csr = (MUSB_EP0_IN == musb->ep0_stage)
1175 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1177 /* data transfer complete; perform status phase */
1178 if (usb_pipeout(urb->pipe)
1179 || !urb->transfer_buffer_length)
1180 csr = MUSB_CSR0_H_STATUSPKT
1181 | MUSB_CSR0_H_REQPKT;
1183 csr = MUSB_CSR0_H_STATUSPKT
1184 | MUSB_CSR0_TXPKTRDY;
1186 /* disable ping token in status phase */
1187 csr |= MUSB_CSR0_H_DIS_PING;
1189 /* flag status stage */
1190 musb->ep0_stage = MUSB_EP0_STATUS;
1192 dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
1195 musb_writew(epio, MUSB_CSR0, csr);
1196 retval = IRQ_HANDLED;
1198 musb->ep0_stage = MUSB_EP0_IDLE;
1200 /* call completion handler if done */
1202 musb_advance_schedule(musb, urb, hw_ep, 1);
1208 #ifdef CONFIG_USB_INVENTRA_DMA
1210 /* Host side TX (OUT) using Mentor DMA works as follows:
1212 - if queue was empty, Program Endpoint
1213 - ... which starts DMA to fifo in mode 1 or 0
1215 DMA Isr (transfer complete) -> TxAvail()
1216 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1217 only in musb_cleanup_urb)
1218 - TxPktRdy has to be set in mode 0 or for
1219 short packets in mode 1.
1224 /* Service a Tx-Available or dma completion irq for the endpoint */
1225 void musb_host_tx(struct musb *musb, u8 epnum)
1232 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1233 void __iomem *epio = hw_ep->regs;
1234 struct musb_qh *qh = hw_ep->out_qh;
1235 struct urb *urb = next_urb(qh);
1237 void __iomem *mbase = musb->mregs;
1238 struct dma_channel *dma;
1239 bool transfer_pending = false;
1241 musb_ep_select(mbase, epnum);
1242 tx_csr = musb_readw(epio, MUSB_TXCSR);
1244 /* with CPPI, DMA sometimes triggers "extra" irqs */
1246 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1251 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1252 dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1253 dma ? ", dma" : "");
1255 /* check for errors */
1256 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1257 /* dma was disabled, fifo flushed */
1258 dev_dbg(musb->controller, "TX end %d stall\n", epnum);
1260 /* stall; record URB status */
1263 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1264 /* (NON-ISO) dma was disabled, fifo flushed */
1265 dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
1267 status = -ETIMEDOUT;
1269 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1270 if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
1271 && !list_is_singular(&musb->out_bulk)) {
1272 dev_dbg(musb->controller,
1273 "NAK timeout on TX%d ep\n", epnum);
1274 musb_bulk_nak_timeout(musb, hw_ep, 0);
1276 dev_dbg(musb->controller,
1277 "TX end=%d device not responding\n", epnum);
1278 /* NOTE: this code path would be a good place to PAUSE a
1279 * transfer, if there's some other (nonperiodic) tx urb
1280 * that could use this fifo. (dma complicates it...)
1281 * That's already done for bulk RX transfers.
1283 * if (bulk && qh->ring.next != &musb->out_bulk), then
1284 * we have a candidate... NAKing is *NOT* an error
1286 musb_ep_select(mbase, epnum);
1287 musb_writew(epio, MUSB_TXCSR,
1288 MUSB_TXCSR_H_WZC_BITS
1289 | MUSB_TXCSR_TXPKTRDY);
1296 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1297 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1298 (void) musb->dma_controller->channel_abort(dma);
1301 /* do the proper sequence to abort the transfer in the
1302 * usb core; the dma engine should already be stopped.
1304 musb_h_tx_flush_fifo(hw_ep);
1305 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1306 | MUSB_TXCSR_DMAENAB
1307 | MUSB_TXCSR_H_ERROR
1308 | MUSB_TXCSR_H_RXSTALL
1309 | MUSB_TXCSR_H_NAKTIMEOUT
1312 musb_ep_select(mbase, epnum);
1313 musb_writew(epio, MUSB_TXCSR, tx_csr);
1314 /* REVISIT may need to clear FLUSHFIFO ... */
1315 musb_writew(epio, MUSB_TXCSR, tx_csr);
1316 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1321 /* second cppi case */
1322 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1323 dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1327 if (is_dma_capable() && dma && !status) {
1329 * DMA has completed. But if we're using DMA mode 1 (multi
1330 * packet DMA), we need a terminal TXPKTRDY interrupt before
1331 * we can consider this transfer completed, lest we trash
1332 * its last packet when writing the next URB's data. So we
1333 * switch back to mode 0 to get that interrupt; we'll come
1334 * back here once it happens.
1336 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1338 * We shouldn't clear DMAMODE with DMAENAB set; so
1339 * clear them in a safe order. That should be OK
1340 * once TXPKTRDY has been set (and I've never seen
1341 * it being 0 at this moment -- DMA interrupt latency
1342 * is significant) but if it hasn't been then we have
1343 * no choice but to stop being polite and ignore the
1344 * programmer's guide... :-)
1346 * Note that we must write TXCSR with TXPKTRDY cleared
1347 * in order not to re-trigger the packet send (this bit
1348 * can't be cleared by CPU), and there's another caveat:
1349 * TXPKTRDY may be set shortly and then cleared in the
1350 * double-buffered FIFO mode, so we do an extra TXCSR
1351 * read for debouncing...
1353 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1354 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1355 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1356 MUSB_TXCSR_TXPKTRDY);
1357 musb_writew(epio, MUSB_TXCSR,
1358 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1360 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1361 MUSB_TXCSR_TXPKTRDY);
1362 musb_writew(epio, MUSB_TXCSR,
1363 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1366 * There is no guarantee that we'll get an interrupt
1367 * after clearing DMAMODE as we might have done this
1368 * too late (after TXPKTRDY was cleared by controller).
1369 * Re-read TXCSR as we have spoiled its previous value.
1371 tx_csr = musb_readw(epio, MUSB_TXCSR);
1375 * We may get here from a DMA completion or TXPKTRDY interrupt.
1376 * In any case, we must check the FIFO status here and bail out
1377 * only if the FIFO still has data -- that should prevent the
1378 * "missed" TXPKTRDY interrupts and deal with double-buffered
1381 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1382 dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
1383 "CSR %04x\n", tx_csr);
1388 if (!status || dma || usb_pipeisoc(pipe)) {
1390 length = dma->actual_len;
1392 length = qh->segsize;
1393 qh->offset += length;
1395 if (usb_pipeisoc(pipe)) {
1396 struct usb_iso_packet_descriptor *d;
1398 d = urb->iso_frame_desc + qh->iso_idx;
1399 d->actual_length = length;
1401 if (++qh->iso_idx >= urb->number_of_packets) {
1408 } else if (dma && urb->transfer_buffer_length == qh->offset) {
1411 /* see if we need to send more data, or ZLP */
1412 if (qh->segsize < qh->maxpacket)
1414 else if (qh->offset == urb->transfer_buffer_length
1415 && !(urb->transfer_flags
1419 offset = qh->offset;
1420 length = urb->transfer_buffer_length - offset;
1421 transfer_pending = true;
1426 /* urb->status != -EINPROGRESS means request has been faulted,
1427 * so we must abort this transfer after cleanup
1429 if (urb->status != -EINPROGRESS) {
1432 status = urb->status;
1437 urb->status = status;
1438 urb->actual_length = qh->offset;
1439 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1441 } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) {
1442 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1444 if (is_cppi_enabled() || tusb_dma_omap())
1445 musb_h_tx_dma_start(hw_ep);
1448 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1449 dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
1454 * PIO: start next packet in this URB.
1456 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1457 * (and presumably, FIFO is not half-full) we should write *two*
1458 * packets before updating TXCSR; other docs disagree...
1460 if (length > qh->maxpacket)
1461 length = qh->maxpacket;
1462 /* Unmap the buffer so that CPU can use it */
1463 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1466 * We need to map sg if the transfer_buffer is
1469 if (!urb->transfer_buffer)
1473 /* sg_miter_start is already done in musb_ep_program */
1474 if (!sg_miter_next(&qh->sg_miter)) {
1475 dev_err(musb->controller, "error: sg list empty\n");
1476 sg_miter_stop(&qh->sg_miter);
1480 urb->transfer_buffer = qh->sg_miter.addr;
1481 length = min_t(u32, length, qh->sg_miter.length);
1482 musb_write_fifo(hw_ep, length, urb->transfer_buffer);
1483 qh->sg_miter.consumed = length;
1484 sg_miter_stop(&qh->sg_miter);
1486 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1489 qh->segsize = length;
1492 if (offset + length >= urb->transfer_buffer_length)
1496 musb_ep_select(mbase, epnum);
1497 musb_writew(epio, MUSB_TXCSR,
1498 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1502 #ifdef CONFIG_USB_INVENTRA_DMA
1504 /* Host side RX (IN) using Mentor DMA works as follows:
1506 - if queue was empty, ProgramEndpoint
1507 - first IN token is sent out (by setting ReqPkt)
1508 LinuxIsr -> RxReady()
1509 /\ => first packet is received
1510 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1511 | -> DMA Isr (transfer complete) -> RxReady()
1512 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1513 | - if urb not complete, send next IN token (ReqPkt)
1514 | | else complete urb.
1516 ---------------------------
1518 * Nuances of mode 1:
1519 * For short packets, no ack (+RxPktRdy) is sent automatically
1520 * (even if AutoClear is ON)
1521 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1522 * automatically => major problem, as collecting the next packet becomes
1523 * difficult. Hence mode 1 is not used.
1526 * All we care about at this driver level is that
1527 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1528 * (b) termination conditions are: short RX, or buffer full;
1529 * (c) fault modes include
1530 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1531 * (and that endpoint's dma queue stops immediately)
1532 * - overflow (full, PLUS more bytes in the terminal packet)
1534 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1535 * thus be a great candidate for using mode 1 ... for all but the
1536 * last packet of one URB's transfer.
1542 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1543 * and high-bandwidth IN transfer cases.
1545 void musb_host_rx(struct musb *musb, u8 epnum)
1548 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1549 void __iomem *epio = hw_ep->regs;
1550 struct musb_qh *qh = hw_ep->in_qh;
1552 void __iomem *mbase = musb->mregs;
1555 bool iso_err = false;
1558 struct dma_channel *dma;
1559 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1561 musb_ep_select(mbase, epnum);
1564 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1568 rx_csr = musb_readw(epio, MUSB_RXCSR);
1571 if (unlikely(!urb)) {
1572 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1573 * usbtest #11 (unlinks) triggers it regularly, sometimes
1574 * with fifo full. (Only with DMA??)
1576 dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1577 musb_readw(epio, MUSB_RXCOUNT));
1578 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1584 dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1585 epnum, rx_csr, urb->actual_length,
1586 dma ? dma->actual_len : 0);
1588 /* check for errors, concurrent stall & unlink is not really
1590 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1591 dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
1593 /* stall; record URB status */
1596 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1597 dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
1600 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1602 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1604 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1605 dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
1607 /* NOTE: NAKing is *NOT* an error, so we want to
1608 * continue. Except ... if there's a request for
1609 * another QH, use that instead of starving it.
1611 * Devices like Ethernet and serial adapters keep
1612 * reads posted at all times, which will starve
1613 * other devices without this logic.
1615 if (usb_pipebulk(urb->pipe)
1617 && !list_is_singular(&musb->in_bulk)) {
1618 musb_bulk_nak_timeout(musb, hw_ep, 1);
1621 musb_ep_select(mbase, epnum);
1622 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1623 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1624 musb_writew(epio, MUSB_RXCSR, rx_csr);
1628 dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
1629 /* packet error reported later */
1632 } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
1633 dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
1638 /* faults abort the transfer */
1640 /* clean up dma and collect transfer count */
1641 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1642 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1643 (void) musb->dma_controller->channel_abort(dma);
1644 xfer_len = dma->actual_len;
1646 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1647 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1652 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1653 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1654 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1658 /* thorough shutdown for now ... given more precise fault handling
1659 * and better queueing support, we might keep a DMA pipeline going
1660 * while processing this irq for earlier completions.
1663 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1665 #if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA)
1666 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1667 /* REVISIT this happened for a while on some short reads...
1668 * the cleanup still needs investigation... looks bad...
1669 * and also duplicates dma cleanup code above ... plus,
1670 * shouldn't this be the "half full" double buffer case?
1672 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1673 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1674 (void) musb->dma_controller->channel_abort(dma);
1675 xfer_len = dma->actual_len;
1679 dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1680 xfer_len, dma ? ", dma" : "");
1681 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1683 musb_ep_select(mbase, epnum);
1684 musb_writew(epio, MUSB_RXCSR,
1685 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1688 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1689 xfer_len = dma->actual_len;
1691 val &= ~(MUSB_RXCSR_DMAENAB
1692 | MUSB_RXCSR_H_AUTOREQ
1693 | MUSB_RXCSR_AUTOCLEAR
1694 | MUSB_RXCSR_RXPKTRDY);
1695 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1697 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
1698 if (usb_pipeisoc(pipe)) {
1699 struct usb_iso_packet_descriptor *d;
1701 d = urb->iso_frame_desc + qh->iso_idx;
1702 d->actual_length = xfer_len;
1704 /* even if there was an error, we did the dma
1705 * for iso_frame_desc->length
1707 if (d->status != -EILSEQ && d->status != -EOVERFLOW)
1710 if (++qh->iso_idx >= urb->number_of_packets)
1716 /* done if urb buffer is full or short packet is recd */
1717 done = (urb->actual_length + xfer_len >=
1718 urb->transfer_buffer_length
1719 || dma->actual_len < qh->maxpacket);
1722 /* send IN token for next packet, without AUTOREQ */
1724 val |= MUSB_RXCSR_H_REQPKT;
1725 musb_writew(epio, MUSB_RXCSR,
1726 MUSB_RXCSR_H_WZC_BITS | val);
1729 dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1730 done ? "off" : "reset",
1731 musb_readw(epio, MUSB_RXCSR),
1732 musb_readw(epio, MUSB_RXCOUNT));
1736 } else if (urb->status == -EINPROGRESS) {
1737 /* if no errors, be sure a packet is ready for unloading */
1738 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1740 ERR("Rx interrupt with no errors or packet!\n");
1742 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1745 /* do the proper sequence to abort the transfer */
1746 musb_ep_select(mbase, epnum);
1747 val &= ~MUSB_RXCSR_H_REQPKT;
1748 musb_writew(epio, MUSB_RXCSR, val);
1752 /* we are expecting IN packets */
1753 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
1755 struct dma_controller *c;
1760 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1762 dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n",
1764 (unsigned long long) urb->transfer_dma
1765 + urb->actual_length,
1767 urb->transfer_buffer_length);
1769 c = musb->dma_controller;
1771 if (usb_pipeisoc(pipe)) {
1773 struct usb_iso_packet_descriptor *d;
1775 d = urb->iso_frame_desc + qh->iso_idx;
1781 if (rx_count > d->length) {
1782 if (d_status == 0) {
1783 d_status = -EOVERFLOW;
1786 dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\
1787 rx_count, d->length);
1792 d->status = d_status;
1793 buf = urb->transfer_dma + d->offset;
1796 buf = urb->transfer_dma +
1800 dma->desired_mode = 0;
1802 /* because of the issue below, mode 1 will
1803 * only rarely behave with correct semantics.
1805 if ((urb->transfer_flags &
1807 && (urb->transfer_buffer_length -
1810 dma->desired_mode = 1;
1811 if (rx_count < hw_ep->max_packet_sz_rx) {
1813 dma->desired_mode = 0;
1815 length = urb->transfer_buffer_length;
1819 /* Disadvantage of using mode 1:
1820 * It's basically usable only for mass storage class; essentially all
1821 * other protocols also terminate transfers on short packets.
1824 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1825 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1826 * to use the extra IN token to grab the last packet using mode 0, then
1827 * the problem is that you cannot be sure when the device will send the
1828 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1829 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1830 * transfer, while sometimes it is recd just a little late so that if you
1831 * try to configure for mode 0 soon after the mode 1 transfer is
1832 * completed, you will find rxcount 0. Okay, so you might think why not
1833 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1836 val = musb_readw(epio, MUSB_RXCSR);
1837 val &= ~MUSB_RXCSR_H_REQPKT;
1839 if (dma->desired_mode == 0)
1840 val &= ~MUSB_RXCSR_H_AUTOREQ;
1842 val |= MUSB_RXCSR_H_AUTOREQ;
1843 val |= MUSB_RXCSR_DMAENAB;
1845 /* autoclear shouldn't be set in high bandwidth */
1846 if (qh->hb_mult == 1)
1847 val |= MUSB_RXCSR_AUTOCLEAR;
1849 musb_writew(epio, MUSB_RXCSR,
1850 MUSB_RXCSR_H_WZC_BITS | val);
1852 /* REVISIT if when actual_length != 0,
1853 * transfer_buffer_length needs to be
1856 ret = c->channel_program(
1858 dma->desired_mode, buf, length);
1861 c->channel_release(dma);
1862 hw_ep->rx_channel = NULL;
1864 val = musb_readw(epio, MUSB_RXCSR);
1865 val &= ~(MUSB_RXCSR_DMAENAB
1866 | MUSB_RXCSR_H_AUTOREQ
1867 | MUSB_RXCSR_AUTOCLEAR);
1868 musb_writew(epio, MUSB_RXCSR, val);
1871 #endif /* Mentor DMA */
1874 unsigned int received_len;
1876 /* Unmap the buffer so that CPU can use it */
1877 usb_hcd_unmap_urb_for_dma(musb->hcd, urb);
1880 * We need to map sg if the transfer_buffer is
1883 if (!urb->transfer_buffer) {
1885 sg_miter_start(&qh->sg_miter, urb->sg, 1,
1890 if (!sg_miter_next(&qh->sg_miter)) {
1891 dev_err(musb->controller, "error: sg list empty\n");
1892 sg_miter_stop(&qh->sg_miter);
1897 urb->transfer_buffer = qh->sg_miter.addr;
1898 received_len = urb->actual_length;
1900 done = musb_host_packet_rx(musb, urb, epnum,
1902 /* Calculate the number of bytes received */
1903 received_len = urb->actual_length -
1905 qh->sg_miter.consumed = received_len;
1906 sg_miter_stop(&qh->sg_miter);
1908 done = musb_host_packet_rx(musb, urb,
1911 dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
1916 urb->actual_length += xfer_len;
1917 qh->offset += xfer_len;
1922 if (urb->status == -EINPROGRESS)
1923 urb->status = status;
1924 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1928 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1929 * the software schedule associates multiple such nodes with a given
1930 * host side hardware endpoint + direction; scheduling may activate
1931 * that hardware endpoint.
1933 static int musb_schedule(
1940 int best_end, epnum;
1941 struct musb_hw_ep *hw_ep = NULL;
1942 struct list_head *head = NULL;
1945 struct urb *urb = next_urb(qh);
1947 /* use fixed hardware for control and bulk */
1948 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1949 head = &musb->control;
1950 hw_ep = musb->control_ep;
1954 /* else, periodic transfers get muxed to other endpoints */
1957 * We know this qh hasn't been scheduled, so all we need to do
1958 * is choose which hardware endpoint to put it on ...
1960 * REVISIT what we really want here is a regular schedule tree
1961 * like e.g. OHCI uses.
1966 for (epnum = 1, hw_ep = musb->endpoints + 1;
1967 epnum < musb->nr_endpoints;
1971 if (musb_ep_get_qh(hw_ep, is_in) != NULL)
1974 if (hw_ep == musb->bulk_ep)
1978 diff = hw_ep->max_packet_sz_rx;
1980 diff = hw_ep->max_packet_sz_tx;
1981 diff -= (qh->maxpacket * qh->hb_mult);
1983 if (diff >= 0 && best_diff > diff) {
1986 * Mentor controller has a bug in that if we schedule
1987 * a BULK Tx transfer on an endpoint that had earlier
1988 * handled ISOC then the BULK transfer has to start on
1989 * a zero toggle. If the BULK transfer starts on a 1
1990 * toggle then this transfer will fail as the mentor
1991 * controller starts the Bulk transfer on a 0 toggle
1992 * irrespective of the programming of the toggle bits
1993 * in the TXCSR register. Check for this condition
1994 * while allocating the EP for a Tx Bulk transfer. If
1997 hw_ep = musb->endpoints + epnum;
1998 toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
1999 txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
2001 if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
2002 toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
2009 /* use bulk reserved ep1 if no other ep is free */
2010 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
2011 hw_ep = musb->bulk_ep;
2013 head = &musb->in_bulk;
2015 head = &musb->out_bulk;
2017 /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
2018 * multiplexed. This scheme does not work in high speed to full
2019 * speed scenario as NAK interrupts are not coming from a
2020 * full speed device connected to a high speed device.
2021 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
2022 * 4 (8 frame or 8ms) for FS device.
2026 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
2028 } else if (best_end < 0) {
2034 hw_ep = musb->endpoints + best_end;
2035 dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
2038 idle = list_empty(head);
2039 list_add_tail(&qh->ring, head);
2043 qh->hep->hcpriv = qh;
2045 musb_start_urb(musb, is_in, qh);
2049 static int musb_urb_enqueue(
2050 struct usb_hcd *hcd,
2054 unsigned long flags;
2055 struct musb *musb = hcd_to_musb(hcd);
2056 struct usb_host_endpoint *hep = urb->ep;
2058 struct usb_endpoint_descriptor *epd = &hep->desc;
2063 /* host role must be active */
2064 if (!is_host_active(musb) || !musb->is_active)
2067 spin_lock_irqsave(&musb->lock, flags);
2068 ret = usb_hcd_link_urb_to_ep(hcd, urb);
2069 qh = ret ? NULL : hep->hcpriv;
2072 spin_unlock_irqrestore(&musb->lock, flags);
2074 /* DMA mapping was already done, if needed, and this urb is on
2075 * hep->urb_list now ... so we're done, unless hep wasn't yet
2076 * scheduled onto a live qh.
2078 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
2079 * disabled, testing for empty qh->ring and avoiding qh setup costs
2080 * except for the first urb queued after a config change.
2085 /* Allocate and initialize qh, minimizing the work done each time
2086 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
2088 * REVISIT consider a dedicated qh kmem_cache, so it's harder
2089 * for bugs in other kernel code to break this driver...
2091 qh = kzalloc(sizeof *qh, mem_flags);
2093 spin_lock_irqsave(&musb->lock, flags);
2094 usb_hcd_unlink_urb_from_ep(hcd, urb);
2095 spin_unlock_irqrestore(&musb->lock, flags);
2101 INIT_LIST_HEAD(&qh->ring);
2104 qh->maxpacket = usb_endpoint_maxp(epd);
2105 qh->type = usb_endpoint_type(epd);
2107 /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
2108 * Some musb cores don't support high bandwidth ISO transfers; and
2109 * we don't (yet!) support high bandwidth interrupt transfers.
2111 qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03);
2112 if (qh->hb_mult > 1) {
2113 int ok = (qh->type == USB_ENDPOINT_XFER_ISOC);
2116 ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx)
2117 || (usb_pipeout(urb->pipe) && musb->hb_iso_tx);
2122 qh->maxpacket &= 0x7ff;
2125 qh->epnum = usb_endpoint_num(epd);
2127 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
2128 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
2130 /* precompute rxtype/txtype/type0 register */
2131 type_reg = (qh->type << 4) | qh->epnum;
2132 switch (urb->dev->speed) {
2136 case USB_SPEED_FULL:
2142 qh->type_reg = type_reg;
2144 /* Precompute RXINTERVAL/TXINTERVAL register */
2146 case USB_ENDPOINT_XFER_INT:
2148 * Full/low speeds use the linear encoding,
2149 * high speed uses the logarithmic encoding.
2151 if (urb->dev->speed <= USB_SPEED_FULL) {
2152 interval = max_t(u8, epd->bInterval, 1);
2156 case USB_ENDPOINT_XFER_ISOC:
2157 /* ISO always uses logarithmic encoding */
2158 interval = min_t(u8, epd->bInterval, 16);
2161 /* REVISIT we actually want to use NAK limits, hinting to the
2162 * transfer scheduling logic to try some other qh, e.g. try
2165 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2167 * The downside of disabling this is that transfer scheduling
2168 * gets VERY unfair for nonperiodic transfers; a misbehaving
2169 * peripheral could make that hurt. That's perfectly normal
2170 * for reads from network or serial adapters ... so we have
2171 * partial NAKlimit support for bulk RX.
2173 * The upside of disabling it is simpler transfer scheduling.
2177 qh->intv_reg = interval;
2179 /* precompute addressing for external hub/tt ports */
2180 if (musb->is_multipoint) {
2181 struct usb_device *parent = urb->dev->parent;
2183 if (parent != hcd->self.root_hub) {
2184 qh->h_addr_reg = (u8) parent->devnum;
2186 /* set up tt info if needed */
2188 qh->h_port_reg = (u8) urb->dev->ttport;
2189 if (urb->dev->tt->hub)
2191 (u8) urb->dev->tt->hub->devnum;
2192 if (urb->dev->tt->multi)
2193 qh->h_addr_reg |= 0x80;
2198 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2199 * until we get real dma queues (with an entry for each urb/buffer),
2200 * we only have work to do in the former case.
2202 spin_lock_irqsave(&musb->lock, flags);
2203 if (hep->hcpriv || !next_urb(qh)) {
2204 /* some concurrent activity submitted another urb to hep...
2205 * odd, rare, error prone, but legal.
2211 ret = musb_schedule(musb, qh,
2212 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2216 /* FIXME set urb->start_frame for iso/intr, it's tested in
2217 * musb_start_urb(), but otherwise only konicawc cares ...
2220 spin_unlock_irqrestore(&musb->lock, flags);
2224 spin_lock_irqsave(&musb->lock, flags);
2225 usb_hcd_unlink_urb_from_ep(hcd, urb);
2226 spin_unlock_irqrestore(&musb->lock, flags);
2234 * abort a transfer that's at the head of a hardware queue.
2235 * called with controller locked, irqs blocked
2236 * that hardware queue advances to the next transfer, unless prevented
2238 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
2240 struct musb_hw_ep *ep = qh->hw_ep;
2241 struct musb *musb = ep->musb;
2242 void __iomem *epio = ep->regs;
2243 unsigned hw_end = ep->epnum;
2244 void __iomem *regs = ep->musb->mregs;
2245 int is_in = usb_pipein(urb->pipe);
2249 musb_ep_select(regs, hw_end);
2251 if (is_dma_capable()) {
2252 struct dma_channel *dma;
2254 dma = is_in ? ep->rx_channel : ep->tx_channel;
2256 status = ep->musb->dma_controller->channel_abort(dma);
2257 dev_dbg(musb->controller,
2258 "abort %cX%d DMA for urb %p --> %d\n",
2259 is_in ? 'R' : 'T', ep->epnum,
2261 urb->actual_length += dma->actual_len;
2265 /* turn off DMA requests, discard state, stop polling ... */
2266 if (ep->epnum && is_in) {
2267 /* giveback saves bulk toggle */
2268 csr = musb_h_flush_rxfifo(ep, 0);
2270 /* REVISIT we still get an irq; should likely clear the
2271 * endpoint's irq status here to avoid bogus irqs.
2272 * clearing that status is platform-specific...
2274 } else if (ep->epnum) {
2275 musb_h_tx_flush_fifo(ep);
2276 csr = musb_readw(epio, MUSB_TXCSR);
2277 csr &= ~(MUSB_TXCSR_AUTOSET
2278 | MUSB_TXCSR_DMAENAB
2279 | MUSB_TXCSR_H_RXSTALL
2280 | MUSB_TXCSR_H_NAKTIMEOUT
2281 | MUSB_TXCSR_H_ERROR
2282 | MUSB_TXCSR_TXPKTRDY);
2283 musb_writew(epio, MUSB_TXCSR, csr);
2284 /* REVISIT may need to clear FLUSHFIFO ... */
2285 musb_writew(epio, MUSB_TXCSR, csr);
2286 /* flush cpu writebuffer */
2287 csr = musb_readw(epio, MUSB_TXCSR);
2289 musb_h_ep0_flush_fifo(ep);
2292 musb_advance_schedule(ep->musb, urb, ep, is_in);
2296 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2298 struct musb *musb = hcd_to_musb(hcd);
2300 unsigned long flags;
2301 int is_in = usb_pipein(urb->pipe);
2304 dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
2305 usb_pipedevice(urb->pipe),
2306 usb_pipeendpoint(urb->pipe),
2307 is_in ? "in" : "out");
2309 spin_lock_irqsave(&musb->lock, flags);
2310 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2319 * Any URB not actively programmed into endpoint hardware can be
2320 * immediately given back; that's any URB not at the head of an
2321 * endpoint queue, unless someday we get real DMA queues. And even
2322 * if it's at the head, it might not be known to the hardware...
2324 * Otherwise abort current transfer, pending DMA, etc.; urb->status
2325 * has already been updated. This is a synchronous abort; it'd be
2326 * OK to hold off until after some IRQ, though.
2328 * NOTE: qh is invalid unless !list_empty(&hep->urb_list)
2331 || urb->urb_list.prev != &qh->hep->urb_list
2332 || musb_ep_get_qh(qh->hw_ep, is_in) != qh) {
2333 int ready = qh->is_ready;
2336 musb_giveback(musb, urb, 0);
2337 qh->is_ready = ready;
2339 /* If nothing else (usually musb_giveback) is using it
2340 * and its URB list has emptied, recycle this qh.
2342 if (ready && list_empty(&qh->hep->urb_list)) {
2343 qh->hep->hcpriv = NULL;
2344 list_del(&qh->ring);
2348 ret = musb_cleanup_urb(urb, qh);
2350 spin_unlock_irqrestore(&musb->lock, flags);
2354 /* disable an endpoint */
2356 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2358 u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN;
2359 unsigned long flags;
2360 struct musb *musb = hcd_to_musb(hcd);
2364 spin_lock_irqsave(&musb->lock, flags);
2370 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2372 /* Kick the first URB off the hardware, if needed */
2374 if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) {
2377 /* make software (then hardware) stop ASAP */
2379 urb->status = -ESHUTDOWN;
2382 musb_cleanup_urb(urb, qh);
2384 /* Then nuke all the others ... and advance the
2385 * queue on hw_ep (e.g. bulk ring) when we're done.
2387 while (!list_empty(&hep->urb_list)) {
2389 urb->status = -ESHUTDOWN;
2390 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2393 /* Just empty the queue; the hardware is busy with
2394 * other transfers, and since !qh->is_ready nothing
2395 * will activate any of these as it advances.
2397 while (!list_empty(&hep->urb_list))
2398 musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2401 list_del(&qh->ring);
2405 spin_unlock_irqrestore(&musb->lock, flags);
2408 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2410 struct musb *musb = hcd_to_musb(hcd);
2412 return musb_readw(musb->mregs, MUSB_FRAME);
2415 static int musb_h_start(struct usb_hcd *hcd)
2417 struct musb *musb = hcd_to_musb(hcd);
2419 /* NOTE: musb_start() is called when the hub driver turns
2420 * on port power, or when (OTG) peripheral starts.
2422 hcd->state = HC_STATE_RUNNING;
2423 musb->port1_status = 0;
2427 static void musb_h_stop(struct usb_hcd *hcd)
2429 musb_stop(hcd_to_musb(hcd));
2430 hcd->state = HC_STATE_HALT;
2433 static int musb_bus_suspend(struct usb_hcd *hcd)
2435 struct musb *musb = hcd_to_musb(hcd);
2438 musb_port_suspend(musb, true);
2440 if (!is_host_active(musb))
2443 switch (musb->xceiv->state) {
2444 case OTG_STATE_A_SUSPEND:
2446 case OTG_STATE_A_WAIT_VRISE:
2447 /* ID could be grounded even if there's no device
2448 * on the other end of the cable. NOTE that the
2449 * A_WAIT_VRISE timers are messy with MUSB...
2451 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2452 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2453 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2459 if (musb->is_active) {
2460 WARNING("trying to suspend as %s while active\n",
2461 usb_otg_state_string(musb->xceiv->state));
2467 static int musb_bus_resume(struct usb_hcd *hcd)
2469 struct musb *musb = hcd_to_musb(hcd);
2472 musb->config->host_port_deassert_reset_at_resume)
2473 musb_port_reset(musb, false);
2478 #ifndef CONFIG_MUSB_PIO_ONLY
2480 #define MUSB_USB_DMA_ALIGN 4
2482 struct musb_temp_buffer {
2484 void *old_xfer_buffer;
2488 static void musb_free_temp_buffer(struct urb *urb)
2490 enum dma_data_direction dir;
2491 struct musb_temp_buffer *temp;
2493 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2496 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2498 temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
2501 if (dir == DMA_FROM_DEVICE) {
2502 memcpy(temp->old_xfer_buffer, temp->data,
2503 urb->transfer_buffer_length);
2505 urb->transfer_buffer = temp->old_xfer_buffer;
2506 kfree(temp->kmalloc_ptr);
2508 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2511 static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
2513 enum dma_data_direction dir;
2514 struct musb_temp_buffer *temp;
2516 size_t kmalloc_size;
2518 if (urb->num_sgs || urb->sg ||
2519 urb->transfer_buffer_length == 0 ||
2520 !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
2523 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2525 /* Allocate a buffer with enough padding for alignment */
2526 kmalloc_size = urb->transfer_buffer_length +
2527 sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
2529 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2533 /* Position our struct temp_buffer such that data is aligned */
2534 temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
2537 temp->kmalloc_ptr = kmalloc_ptr;
2538 temp->old_xfer_buffer = urb->transfer_buffer;
2539 if (dir == DMA_TO_DEVICE)
2540 memcpy(temp->data, urb->transfer_buffer,
2541 urb->transfer_buffer_length);
2542 urb->transfer_buffer = temp->data;
2544 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2549 static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
2552 struct musb *musb = hcd_to_musb(hcd);
2556 * The DMA engine in RTL1.8 and above cannot handle
2557 * DMA addresses that are not aligned to a 4 byte boundary.
2558 * For such engine implemented (un)map_urb_for_dma hooks.
2559 * Do not use these hooks for RTL<1.8
2561 if (musb->hwvers < MUSB_HWVERS_1800)
2562 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2564 ret = musb_alloc_temp_buffer(urb, mem_flags);
2568 ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
2570 musb_free_temp_buffer(urb);
2575 static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
2577 struct musb *musb = hcd_to_musb(hcd);
2579 usb_hcd_unmap_urb_for_dma(hcd, urb);
2581 /* Do not use this hook for RTL<1.8 (see description above) */
2582 if (musb->hwvers < MUSB_HWVERS_1800)
2585 musb_free_temp_buffer(urb);
2587 #endif /* !CONFIG_MUSB_PIO_ONLY */
2589 static const struct hc_driver musb_hc_driver = {
2590 .description = "musb-hcd",
2591 .product_desc = "MUSB HDRC host driver",
2592 .hcd_priv_size = sizeof(struct musb *),
2593 .flags = HCD_USB2 | HCD_MEMORY,
2595 /* not using irq handler or reset hooks from usbcore, since
2596 * those must be shared with peripheral code for OTG configs
2599 .start = musb_h_start,
2600 .stop = musb_h_stop,
2602 .get_frame_number = musb_h_get_frame_number,
2604 .urb_enqueue = musb_urb_enqueue,
2605 .urb_dequeue = musb_urb_dequeue,
2606 .endpoint_disable = musb_h_disable,
2608 #ifndef CONFIG_MUSB_PIO_ONLY
2609 .map_urb_for_dma = musb_map_urb_for_dma,
2610 .unmap_urb_for_dma = musb_unmap_urb_for_dma,
2613 .hub_status_data = musb_hub_status_data,
2614 .hub_control = musb_hub_control,
2615 .bus_suspend = musb_bus_suspend,
2616 .bus_resume = musb_bus_resume,
2617 /* .start_port_reset = NULL, */
2618 /* .hub_irq_enable = NULL, */
2621 int musb_host_alloc(struct musb *musb)
2623 struct device *dev = musb->controller;
2625 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
2626 musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
2630 *musb->hcd->hcd_priv = (unsigned long) musb;
2631 musb->hcd->self.uses_pio_for_control = 1;
2632 musb->hcd->uses_new_polling = 1;
2633 musb->hcd->has_tt = 1;
2638 void musb_host_cleanup(struct musb *musb)
2640 if (musb->port_mode == MUSB_PORT_MODE_GADGET)
2642 usb_remove_hcd(musb->hcd);
2646 void musb_host_free(struct musb *musb)
2648 usb_put_hcd(musb->hcd);
2651 int musb_host_setup(struct musb *musb, int power_budget)
2654 struct usb_hcd *hcd = musb->hcd;
2656 MUSB_HST_MODE(musb);
2657 musb->xceiv->otg->default_a = 1;
2658 musb->xceiv->state = OTG_STATE_A_IDLE;
2660 otg_set_host(musb->xceiv->otg, &hcd->self);
2661 hcd->self.otg_port = 1;
2662 musb->xceiv->otg->host = &hcd->self;
2663 hcd->power_budget = 2 * (power_budget ? : 250);
2665 ret = usb_add_hcd(hcd, 0, 0);
2669 device_wakeup_enable(hcd->self.controller);
2673 void musb_host_resume_root_hub(struct musb *musb)
2675 usb_hcd_resume_root_hub(musb->hcd);
2678 void musb_host_poke_root_hub(struct musb *musb)
2680 MUSB_HST_MODE(musb);
2681 if (musb->hcd->status_urb)
2682 usb_hcd_poll_rh_status(musb->hcd);
2684 usb_hcd_resume_root_hub(musb->hcd);