1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
10 #include "musb_trace.h"
12 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
14 #define EP_MODE_AUTOREQ_NONE 0
15 #define EP_MODE_AUTOREQ_ALL_NEOP 1
16 #define EP_MODE_AUTOREQ_ALWAYS 3
18 #define EP_MODE_DMA_TRANSPARENT 0
19 #define EP_MODE_DMA_RNDIS 1
20 #define EP_MODE_DMA_GEN_RNDIS 3
22 #define USB_CTRL_TX_MODE 0x70
23 #define USB_CTRL_RX_MODE 0x74
24 #define USB_CTRL_AUTOREQ 0xd0
25 #define USB_TDOWN 0xd8
27 #define MUSB_DMA_NUM_CHANNELS 15
29 struct cppi41_dma_controller {
30 struct dma_controller controller;
31 struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
32 struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
33 struct hrtimer early_tx;
34 struct list_head early_tx_list;
40 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
45 if (cppi41_channel->is_tx)
47 if (!is_host_active(cppi41_channel->controller->controller.musb))
50 csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
51 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
53 cppi41_channel->usb_toggle = toggle;
56 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
58 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
59 struct musb *musb = hw_ep->musb;
63 if (cppi41_channel->is_tx)
65 if (!is_host_active(musb))
68 musb_ep_select(musb->mregs, hw_ep->epnum);
69 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
70 toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
73 * AM335x Advisory 1.0.13: Due to internal synchronisation error the
74 * data toggle may reset from DATA1 to DATA0 during receiving data from
75 * more than one endpoint.
77 if (!toggle && toggle == cppi41_channel->usb_toggle) {
78 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
79 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
80 musb_dbg(musb, "Restoring DATA1 toggle.");
83 cppi41_channel->usb_toggle = toggle;
86 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
88 u8 epnum = hw_ep->epnum;
89 struct musb *musb = hw_ep->musb;
90 void __iomem *epio = musb->endpoints[epnum].regs;
93 musb_ep_select(musb->mregs, hw_ep->epnum);
94 csr = musb_readw(epio, MUSB_TXCSR);
95 if (csr & MUSB_TXCSR_TXPKTRDY)
100 static void cppi41_dma_callback(void *private_data,
101 const struct dmaengine_result *result);
103 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
105 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
106 struct musb *musb = hw_ep->musb;
107 void __iomem *epio = hw_ep->regs;
110 if (!cppi41_channel->prog_len ||
111 (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
114 cppi41_channel->channel.actual_len =
115 cppi41_channel->transferred;
116 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
117 cppi41_channel->channel.rx_packet_done = true;
120 * transmit ZLP using PIO mode for transfers which size is
121 * multiple of EP packet size.
123 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
124 cppi41_channel->packet_sz) == 0) {
125 musb_ep_select(musb->mregs, hw_ep->epnum);
126 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
127 musb_writew(epio, MUSB_TXCSR, csr);
130 trace_musb_cppi41_done(cppi41_channel);
131 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
133 /* next iteration, reload */
134 struct dma_chan *dc = cppi41_channel->dc;
135 struct dma_async_tx_descriptor *dma_desc;
136 enum dma_transfer_direction direction;
139 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
141 remain_bytes = cppi41_channel->total_len;
142 remain_bytes -= cppi41_channel->transferred;
143 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
144 cppi41_channel->prog_len = remain_bytes;
146 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
148 dma_desc = dmaengine_prep_slave_single(dc,
149 cppi41_channel->buf_addr,
152 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
153 if (WARN_ON(!dma_desc))
156 dma_desc->callback_result = cppi41_dma_callback;
157 dma_desc->callback_param = &cppi41_channel->channel;
158 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
159 trace_musb_cppi41_cont(cppi41_channel);
160 dma_async_issue_pending(dc);
162 if (!cppi41_channel->is_tx) {
163 musb_ep_select(musb->mregs, hw_ep->epnum);
164 csr = musb_readw(epio, MUSB_RXCSR);
165 csr |= MUSB_RXCSR_H_REQPKT;
166 musb_writew(epio, MUSB_RXCSR, csr);
171 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
173 struct cppi41_dma_controller *controller;
174 struct cppi41_dma_channel *cppi41_channel, *n;
177 enum hrtimer_restart ret = HRTIMER_NORESTART;
179 controller = container_of(timer, struct cppi41_dma_controller,
181 musb = controller->controller.musb;
183 spin_lock_irqsave(&musb->lock, flags);
184 list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
187 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
189 empty = musb_is_tx_fifo_empty(hw_ep);
191 list_del_init(&cppi41_channel->tx_check);
192 cppi41_trans_done(cppi41_channel);
196 if (!list_empty(&controller->early_tx_list) &&
197 !hrtimer_is_queued(&controller->early_tx)) {
198 ret = HRTIMER_RESTART;
199 hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
202 spin_unlock_irqrestore(&musb->lock, flags);
206 static void cppi41_dma_callback(void *private_data,
207 const struct dmaengine_result *result)
209 struct dma_channel *channel = private_data;
210 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
211 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
212 struct cppi41_dma_controller *controller;
213 struct musb *musb = hw_ep->musb;
215 struct dma_tx_state txstate;
220 controller = cppi41_channel->controller;
221 if (controller->controller.dma_callback)
222 controller->controller.dma_callback(&controller->controller);
224 if (result->result == DMA_TRANS_ABORTED)
227 spin_lock_irqsave(&musb->lock, flags);
229 dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
231 transferred = cppi41_channel->prog_len - txstate.residue;
232 cppi41_channel->transferred += transferred;
234 trace_musb_cppi41_gb(cppi41_channel);
235 update_rx_toggle(cppi41_channel);
237 if (cppi41_channel->transferred == cppi41_channel->total_len ||
238 transferred < cppi41_channel->packet_sz)
239 cppi41_channel->prog_len = 0;
241 if (cppi41_channel->is_tx) {
244 if (is_host_active(musb))
245 type = hw_ep->out_qh->type;
247 type = hw_ep->ep_in.type;
249 if (type == USB_ENDPOINT_XFER_ISOC)
251 * Don't use the early-TX-interrupt workaround below
252 * for Isoch transfter. Since Isoch are periodic
253 * transfer, by the time the next transfer is
254 * scheduled, the current one should be done already.
256 * This avoids audio playback underrun issue.
260 empty = musb_is_tx_fifo_empty(hw_ep);
263 if (!cppi41_channel->is_tx || empty) {
264 cppi41_trans_done(cppi41_channel);
269 * On AM335x it has been observed that the TX interrupt fires
270 * too early that means the TXFIFO is not yet empty but the DMA
271 * engine says that it is done with the transfer. We don't
272 * receive a FIFO empty interrupt so the only thing we can do is
273 * to poll for the bit. On HS it usually takes 2us, on FS around
274 * 110us - 150us depending on the transfer size.
275 * We spin on HS (no longer than than 25us and setup a timer on
276 * FS to check for the bit and complete the transfer.
278 if (is_host_active(musb)) {
279 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
282 if (musb->g.speed == USB_SPEED_HIGH)
289 empty = musb_is_tx_fifo_empty(hw_ep);
291 cppi41_trans_done(cppi41_channel);
300 list_add_tail(&cppi41_channel->tx_check,
301 &controller->early_tx_list);
302 if (!hrtimer_is_queued(&controller->early_tx)) {
303 unsigned long usecs = cppi41_channel->total_len / 10;
305 hrtimer_start_range_ns(&controller->early_tx,
306 usecs * NSEC_PER_USEC,
312 spin_unlock_irqrestore(&musb->lock, flags);
315 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
319 shift = (ep - 1) * 2;
320 old &= ~(3 << shift);
321 old |= mode << shift;
325 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
328 struct cppi41_dma_controller *controller = cppi41_channel->controller;
329 struct musb *musb = controller->controller.musb;
334 if (cppi41_channel->is_tx)
335 old_mode = controller->tx_mode;
337 old_mode = controller->rx_mode;
338 port = cppi41_channel->port_num;
339 new_mode = update_ep_mode(port, mode, old_mode);
341 if (new_mode == old_mode)
343 if (cppi41_channel->is_tx) {
344 controller->tx_mode = new_mode;
345 musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
347 controller->rx_mode = new_mode;
348 musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
352 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
355 struct cppi41_dma_controller *controller = cppi41_channel->controller;
360 old_mode = controller->auto_req;
361 port = cppi41_channel->port_num;
362 new_mode = update_ep_mode(port, mode, old_mode);
364 if (new_mode == old_mode)
366 controller->auto_req = new_mode;
367 musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
371 static bool cppi41_configure_channel(struct dma_channel *channel,
372 u16 packet_sz, u8 mode,
373 dma_addr_t dma_addr, u32 len)
375 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
376 struct dma_chan *dc = cppi41_channel->dc;
377 struct dma_async_tx_descriptor *dma_desc;
378 enum dma_transfer_direction direction;
379 struct musb *musb = cppi41_channel->controller->controller.musb;
380 unsigned use_gen_rndis = 0;
382 cppi41_channel->buf_addr = dma_addr;
383 cppi41_channel->total_len = len;
384 cppi41_channel->transferred = 0;
385 cppi41_channel->packet_sz = packet_sz;
386 cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
389 * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
390 * than max packet size at a time.
392 if (cppi41_channel->is_tx)
397 if (len > packet_sz) {
398 musb_writel(musb->ctrl_base,
399 RNDIS_REG(cppi41_channel->port_num), len);
401 cppi41_set_dma_mode(cppi41_channel,
402 EP_MODE_DMA_GEN_RNDIS);
405 cppi41_set_autoreq_mode(cppi41_channel,
406 EP_MODE_AUTOREQ_ALL_NEOP);
408 musb_writel(musb->ctrl_base,
409 RNDIS_REG(cppi41_channel->port_num), 0);
410 cppi41_set_dma_mode(cppi41_channel,
411 EP_MODE_DMA_TRANSPARENT);
412 cppi41_set_autoreq_mode(cppi41_channel,
413 EP_MODE_AUTOREQ_NONE);
417 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
418 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
419 len = min_t(u32, packet_sz, len);
421 cppi41_channel->prog_len = len;
422 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
423 dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
424 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
428 dma_desc->callback_result = cppi41_dma_callback;
429 dma_desc->callback_param = channel;
430 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
431 cppi41_channel->channel.rx_packet_done = false;
433 trace_musb_cppi41_config(cppi41_channel);
435 save_rx_toggle(cppi41_channel);
436 dma_async_issue_pending(dc);
440 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
441 struct musb_hw_ep *hw_ep, u8 is_tx)
443 struct cppi41_dma_controller *controller = container_of(c,
444 struct cppi41_dma_controller, controller);
445 struct cppi41_dma_channel *cppi41_channel = NULL;
446 u8 ch_num = hw_ep->epnum - 1;
448 if (ch_num >= MUSB_DMA_NUM_CHANNELS)
452 cppi41_channel = &controller->tx_channel[ch_num];
454 cppi41_channel = &controller->rx_channel[ch_num];
456 if (!cppi41_channel->dc)
459 if (cppi41_channel->is_allocated)
462 cppi41_channel->hw_ep = hw_ep;
463 cppi41_channel->is_allocated = 1;
465 trace_musb_cppi41_alloc(cppi41_channel);
466 return &cppi41_channel->channel;
469 static void cppi41_dma_channel_release(struct dma_channel *channel)
471 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
473 trace_musb_cppi41_free(cppi41_channel);
474 if (cppi41_channel->is_allocated) {
475 cppi41_channel->is_allocated = 0;
476 channel->status = MUSB_DMA_STATUS_FREE;
477 channel->actual_len = 0;
481 static int cppi41_dma_channel_program(struct dma_channel *channel,
482 u16 packet_sz, u8 mode,
483 dma_addr_t dma_addr, u32 len)
486 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
489 BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
490 channel->status == MUSB_DMA_STATUS_BUSY);
492 if (is_host_active(cppi41_channel->controller->controller.musb)) {
493 if (cppi41_channel->is_tx)
494 hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
496 hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
499 channel->status = MUSB_DMA_STATUS_BUSY;
500 channel->actual_len = 0;
503 packet_sz = hb_mult * (packet_sz & 0x7FF);
505 ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
507 channel->status = MUSB_DMA_STATUS_FREE;
512 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
513 void *buf, u32 length)
515 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
516 struct cppi41_dma_controller *controller = cppi41_channel->controller;
517 struct musb *musb = controller->controller.musb;
519 if (is_host_active(musb)) {
523 if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
525 if (cppi41_channel->is_tx)
527 /* AM335x Advisory 1.0.13. No workaround for device RX mode */
531 static int cppi41_dma_channel_abort(struct dma_channel *channel)
533 struct cppi41_dma_channel *cppi41_channel = channel->private_data;
534 struct cppi41_dma_controller *controller = cppi41_channel->controller;
535 struct musb *musb = controller->controller.musb;
536 void __iomem *epio = cppi41_channel->hw_ep->regs;
542 is_tx = cppi41_channel->is_tx;
543 trace_musb_cppi41_abort(cppi41_channel);
545 if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
548 list_del_init(&cppi41_channel->tx_check);
550 csr = musb_readw(epio, MUSB_TXCSR);
551 csr &= ~MUSB_TXCSR_DMAENAB;
552 musb_writew(epio, MUSB_TXCSR, csr);
554 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
556 /* delay to drain to cppi dma pipeline for isoch */
559 csr = musb_readw(epio, MUSB_RXCSR);
560 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
561 musb_writew(epio, MUSB_RXCSR, csr);
563 /* wait to drain cppi dma pipe line */
566 csr = musb_readw(epio, MUSB_RXCSR);
567 if (csr & MUSB_RXCSR_RXPKTRDY) {
568 csr |= MUSB_RXCSR_FLUSHFIFO;
569 musb_writew(epio, MUSB_RXCSR, csr);
570 musb_writew(epio, MUSB_RXCSR, csr);
574 /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
575 if (musb->io.quirks & MUSB_DA8XX)
578 tdbit = 1 << cppi41_channel->port_num;
584 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
585 ret = dmaengine_terminate_all(cppi41_channel->dc);
586 } while (ret == -EAGAIN);
589 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
591 csr = musb_readw(epio, MUSB_TXCSR);
592 if (csr & MUSB_TXCSR_TXPKTRDY) {
593 csr |= MUSB_TXCSR_FLUSHFIFO;
594 musb_writew(epio, MUSB_TXCSR, csr);
598 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
602 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
607 for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
608 dc = ctrl->tx_channel[i].dc;
610 dma_release_channel(dc);
611 dc = ctrl->rx_channel[i].dc;
613 dma_release_channel(dc);
617 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
619 cppi41_release_all_dma_chans(controller);
622 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
624 struct musb *musb = controller->controller.musb;
625 struct device *dev = musb->controller;
626 struct device_node *np = dev->parent->of_node;
627 struct cppi41_dma_channel *cppi41_channel;
632 count = of_property_count_strings(np, "dma-names");
636 for (i = 0; i < count; i++) {
638 struct dma_channel *musb_dma;
643 ret = of_property_read_string_index(np, "dma-names", i, &str);
646 if (strstarts(str, "tx"))
648 else if (strstarts(str, "rx"))
651 dev_err(dev, "Wrong dmatype %s\n", str);
654 ret = kstrtouint(str + 2, 0, &port);
659 if (port > MUSB_DMA_NUM_CHANNELS || !port)
662 cppi41_channel = &controller->tx_channel[port - 1];
664 cppi41_channel = &controller->rx_channel[port - 1];
666 cppi41_channel->controller = controller;
667 cppi41_channel->port_num = port;
668 cppi41_channel->is_tx = is_tx;
669 INIT_LIST_HEAD(&cppi41_channel->tx_check);
671 musb_dma = &cppi41_channel->channel;
672 musb_dma->private_data = cppi41_channel;
673 musb_dma->status = MUSB_DMA_STATUS_FREE;
674 musb_dma->max_len = SZ_4M;
676 dc = dma_request_slave_channel(dev->parent, str);
678 dev_err(dev, "Failed to request %s.\n", str);
682 cppi41_channel->dc = dc;
686 cppi41_release_all_dma_chans(controller);
690 void cppi41_dma_controller_destroy(struct dma_controller *c)
692 struct cppi41_dma_controller *controller = container_of(c,
693 struct cppi41_dma_controller, controller);
695 hrtimer_cancel(&controller->early_tx);
696 cppi41_dma_controller_stop(controller);
699 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
701 struct dma_controller *
702 cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
704 struct cppi41_dma_controller *controller;
707 if (!musb->controller->parent->of_node) {
708 dev_err(musb->controller, "Need DT for the DMA engine.\n");
712 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
716 hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
717 controller->early_tx.function = cppi41_recheck_tx_req;
718 INIT_LIST_HEAD(&controller->early_tx_list);
720 controller->controller.channel_alloc = cppi41_dma_channel_allocate;
721 controller->controller.channel_release = cppi41_dma_channel_release;
722 controller->controller.channel_program = cppi41_dma_channel_program;
723 controller->controller.channel_abort = cppi41_dma_channel_abort;
724 controller->controller.is_compatible = cppi41_is_compatible;
725 controller->controller.musb = musb;
727 ret = cppi41_dma_controller_start(controller);
730 return &controller->controller;
735 if (ret == -EPROBE_DEFER)
739 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);