2 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "usb_trace.h"
21 #define MT_VEND_REQ_MAX_RETRY 10
22 #define MT_VEND_REQ_TOUT_MS 300
24 /* should be called with usb_ctrl_mtx locked */
25 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
26 u8 req_type, u16 val, u16 offset,
27 void *buf, size_t len)
29 struct usb_interface *intf = to_usb_interface(dev->dev);
30 struct usb_device *udev = interface_to_usbdev(intf);
34 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 : usb_sndctrlpipe(udev, 0);
36 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 if (test_bit(MT76_REMOVED, &dev->state))
40 ret = usb_control_msg(udev, pipe, req, req_type, val,
41 offset, buf, len, MT_VEND_REQ_TOUT_MS);
43 set_bit(MT76_REMOVED, &dev->state);
44 if (ret >= 0 || ret == -ENODEV)
46 usleep_range(5000, 10000);
49 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
54 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
55 u8 req_type, u16 val, u16 offset,
56 void *buf, size_t len)
60 mutex_lock(&dev->usb.usb_ctrl_mtx);
61 ret = __mt76u_vendor_request(dev, req, req_type,
62 val, offset, buf, len);
63 trace_usb_reg_wr(dev, offset, val);
64 mutex_unlock(&dev->usb.usb_ctrl_mtx);
68 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
70 /* should be called with usb_ctrl_mtx locked */
71 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
73 struct mt76_usb *usb = &dev->usb;
79 switch (addr & MT_VEND_TYPE_MASK) {
80 case MT_VEND_TYPE_EEPROM:
81 req = MT_VEND_READ_EEPROM;
83 case MT_VEND_TYPE_CFG:
84 req = MT_VEND_READ_CFG;
87 req = MT_VEND_MULTI_READ;
90 offset = addr & ~MT_VEND_TYPE_MASK;
92 ret = __mt76u_vendor_request(dev, req,
93 USB_DIR_IN | USB_TYPE_VENDOR,
94 0, offset, usb->data, sizeof(__le32));
95 if (ret == sizeof(__le32))
96 data = get_unaligned_le32(usb->data);
97 trace_usb_reg_rr(dev, addr, data);
102 u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
106 mutex_lock(&dev->usb.usb_ctrl_mtx);
107 ret = __mt76u_rr(dev, addr);
108 mutex_unlock(&dev->usb.usb_ctrl_mtx);
113 /* should be called with usb_ctrl_mtx locked */
114 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
116 struct mt76_usb *usb = &dev->usb;
120 switch (addr & MT_VEND_TYPE_MASK) {
121 case MT_VEND_TYPE_CFG:
122 req = MT_VEND_WRITE_CFG;
125 req = MT_VEND_MULTI_WRITE;
128 offset = addr & ~MT_VEND_TYPE_MASK;
130 put_unaligned_le32(val, usb->data);
131 __mt76u_vendor_request(dev, req,
132 USB_DIR_OUT | USB_TYPE_VENDOR, 0,
133 offset, usb->data, sizeof(__le32));
134 trace_usb_reg_wr(dev, addr, val);
137 void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
139 mutex_lock(&dev->usb.usb_ctrl_mtx);
140 __mt76u_wr(dev, addr, val);
141 mutex_unlock(&dev->usb.usb_ctrl_mtx);
144 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
147 mutex_lock(&dev->usb.usb_ctrl_mtx);
148 val |= __mt76u_rr(dev, addr) & ~mask;
149 __mt76u_wr(dev, addr, val);
150 mutex_unlock(&dev->usb.usb_ctrl_mtx);
155 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
156 const void *data, int len)
158 struct mt76_usb *usb = &dev->usb;
159 const u32 *val = data;
162 mutex_lock(&usb->usb_ctrl_mtx);
163 for (i = 0; i < (len / 4); i++) {
164 put_unaligned_le32(val[i], usb->data);
165 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
166 USB_DIR_OUT | USB_TYPE_VENDOR,
167 0, offset + i * 4, usb->data,
172 mutex_unlock(&usb->usb_ctrl_mtx);
175 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
176 const u16 offset, const u32 val)
178 mutex_lock(&dev->usb.usb_ctrl_mtx);
179 __mt76u_vendor_request(dev, req,
180 USB_DIR_OUT | USB_TYPE_VENDOR,
181 val & 0xffff, offset, NULL, 0);
182 __mt76u_vendor_request(dev, req,
183 USB_DIR_OUT | USB_TYPE_VENDOR,
184 val >> 16, offset + 2, NULL, 0);
185 mutex_unlock(&dev->usb.usb_ctrl_mtx);
187 EXPORT_SYMBOL_GPL(mt76u_single_wr);
190 mt76u_set_endpoints(struct usb_interface *intf,
191 struct mt76_usb *usb)
193 struct usb_host_interface *intf_desc = intf->cur_altsetting;
194 struct usb_endpoint_descriptor *ep_desc;
195 int i, in_ep = 0, out_ep = 0;
197 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
198 ep_desc = &intf_desc->endpoint[i].desc;
200 if (usb_endpoint_is_bulk_in(ep_desc) &&
201 in_ep < __MT_EP_IN_MAX) {
202 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
203 usb->in_max_packet = usb_endpoint_maxp(ep_desc);
205 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
206 out_ep < __MT_EP_OUT_MAX) {
207 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
208 usb->out_max_packet = usb_endpoint_maxp(ep_desc);
213 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
219 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
220 int nsgs, int len, int sglen)
222 struct urb *urb = buf->urb;
225 for (i = 0; i < nsgs; i++) {
230 data = netdev_alloc_frag(len);
234 page = virt_to_head_page(data);
235 offset = data - page_address(page);
236 sg_set_page(&urb->sg[i], page, sglen, offset);
242 for (j = nsgs; j < urb->num_sgs; j++)
243 skb_free_frag(sg_virt(&urb->sg[j]));
247 urb->num_sgs = max_t(int, i, urb->num_sgs);
248 buf->len = urb->num_sgs * sglen,
249 sg_init_marker(urb->sg, urb->num_sgs);
251 return i ? : -ENOMEM;
254 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
255 int nsgs, int len, int sglen, gfp_t gfp)
257 buf->urb = usb_alloc_urb(0, gfp);
261 buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
266 sg_init_table(buf->urb->sg, nsgs);
269 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
271 EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
273 void mt76u_buf_free(struct mt76u_buf *buf)
275 struct urb *urb = buf->urb;
278 for (i = 0; i < urb->num_sgs; i++)
279 skb_free_frag(sg_virt(&urb->sg[i]));
280 usb_free_urb(buf->urb);
282 EXPORT_SYMBOL_GPL(mt76u_buf_free);
284 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
285 struct mt76u_buf *buf, gfp_t gfp,
286 usb_complete_t complete_fn, void *context)
288 struct usb_interface *intf = to_usb_interface(dev->dev);
289 struct usb_device *udev = interface_to_usbdev(intf);
292 if (dir == USB_DIR_IN)
293 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
295 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
297 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
298 complete_fn, context);
300 return usb_submit_urb(buf->urb, gfp);
302 EXPORT_SYMBOL_GPL(mt76u_submit_buf);
304 static inline struct mt76u_buf
305 *mt76u_get_next_rx_entry(struct mt76_queue *q)
307 struct mt76u_buf *buf = NULL;
310 spin_lock_irqsave(&q->lock, flags);
312 buf = &q->entry[q->head].ubuf;
313 q->head = (q->head + 1) % q->ndesc;
316 spin_unlock_irqrestore(&q->lock, flags);
321 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
323 u16 dma_len, min_len;
325 dma_len = get_unaligned_le16(data);
326 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
329 if (data_len < min_len || WARN_ON(!dma_len) ||
330 WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
331 WARN_ON(dma_len & 0x3))
337 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
339 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
340 u8 *data = sg_virt(&urb->sg[0]);
341 int data_len, len, nsgs = 1;
344 if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
347 len = mt76u_get_rx_entry_len(data, urb->actual_length);
351 skb = build_skb(data, q->buf_size);
355 data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
356 skb_reserve(skb, MT_DMA_HDR_LEN);
357 if (skb->tail + data_len > skb->end) {
362 __skb_put(skb, data_len);
366 data_len = min_t(int, len, urb->sg[nsgs].length);
367 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
368 sg_page(&urb->sg[nsgs]),
369 urb->sg[nsgs].offset,
370 data_len, q->buf_size);
374 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
379 static void mt76u_complete_rx(struct urb *urb)
381 struct mt76_dev *dev = urb->context;
382 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
385 switch (urb->status) {
391 dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
397 spin_lock_irqsave(&q->lock, flags);
398 if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
401 q->tail = (q->tail + 1) % q->ndesc;
403 tasklet_schedule(&dev->usb.rx_tasklet);
405 spin_unlock_irqrestore(&q->lock, flags);
408 static void mt76u_rx_tasklet(unsigned long data)
410 struct mt76_dev *dev = (struct mt76_dev *)data;
411 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
412 int err, nsgs, buf_len = q->buf_size;
413 struct mt76u_buf *buf;
418 buf = mt76u_get_next_rx_entry(q);
422 nsgs = mt76u_process_rx_entry(dev, buf->urb);
424 err = mt76u_fill_rx_sg(dev, buf, nsgs,
426 SKB_WITH_OVERHEAD(buf_len));
430 mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
432 mt76u_complete_rx, dev);
434 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
439 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
441 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
445 spin_lock_irqsave(&q->lock, flags);
446 for (i = 0; i < q->ndesc; i++) {
447 err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
448 &q->entry[i].ubuf, GFP_ATOMIC,
449 mt76u_complete_rx, dev);
453 q->head = q->tail = 0;
455 spin_unlock_irqrestore(&q->lock, flags);
459 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
461 static int mt76u_alloc_rx(struct mt76_dev *dev)
463 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
466 spin_lock_init(&q->lock);
467 q->entry = devm_kcalloc(dev->dev,
468 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
473 if (mt76u_check_sg(dev)) {
474 q->buf_size = MT_RX_BUF_SIZE;
475 nsgs = MT_SG_MAX_SIZE;
477 q->buf_size = PAGE_SIZE;
481 for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
482 err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
484 SKB_WITH_OVERHEAD(q->buf_size),
489 q->ndesc = MT_NUM_RX_ENTRIES;
491 return mt76u_submit_rx_buffers(dev);
494 static void mt76u_free_rx(struct mt76_dev *dev)
496 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
499 for (i = 0; i < q->ndesc; i++)
500 mt76u_buf_free(&q->entry[i].ubuf);
503 static void mt76u_stop_rx(struct mt76_dev *dev)
505 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
508 for (i = 0; i < q->ndesc; i++)
509 usb_kill_urb(q->entry[i].ubuf.urb);
512 int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
514 struct sk_buff *iter, *last = skb;
518 * | 4B | xfer len | pad | 4B |
519 * | TXINFO | pkt/cmd | zero pad to 4B | zero |
521 * length field of TXINFO should be set to 'xfer len'.
523 info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
524 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
525 put_unaligned_le32(info, skb_push(skb, sizeof(info)));
527 pad = round_up(skb->len, 4) + 4 - skb->len;
528 skb_walk_frags(skb, iter) {
531 skb->data_len += pad;
538 if (__skb_pad(last, pad, true))
540 __skb_put(last, pad);
544 EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
546 static void mt76u_tx_tasklet(unsigned long data)
548 struct mt76_dev *dev = (struct mt76_dev *)data;
549 struct mt76u_buf *buf;
550 struct mt76_queue *q;
554 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
557 spin_lock_bh(&q->lock);
559 buf = &q->entry[q->head].ubuf;
560 if (!buf->done || !q->queued)
563 dev->drv->tx_complete_skb(dev, q,
567 if (q->entry[q->head].schedule) {
568 q->entry[q->head].schedule = false;
572 q->head = (q->head + 1) % q->ndesc;
575 mt76_txq_schedule(dev, q);
576 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
578 wake_up(&dev->tx_wait);
580 spin_unlock_bh(&q->lock);
582 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
583 ieee80211_queue_delayed_work(dev->hw,
585 msecs_to_jiffies(10));
588 ieee80211_wake_queue(dev->hw, i);
592 static void mt76u_tx_status_data(struct work_struct *work)
594 struct mt76_usb *usb;
595 struct mt76_dev *dev;
599 usb = container_of(work, struct mt76_usb, stat_work.work);
600 dev = container_of(usb, struct mt76_dev, usb);
603 if (test_bit(MT76_REMOVED, &dev->state))
606 if (!dev->drv->tx_status_data(dev, &update))
611 if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
612 ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
613 msecs_to_jiffies(10));
615 clear_bit(MT76_READING_STATS, &dev->state);
618 static void mt76u_complete_tx(struct urb *urb)
620 struct mt76u_buf *buf = urb->context;
621 struct mt76_dev *dev = buf->dev;
623 if (mt76u_urb_error(urb))
624 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
627 tasklet_schedule(&dev->usb.tx_tasklet);
631 mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
633 int nsgs = 1 + skb_shinfo(skb)->nr_frags;
634 struct sk_buff *iter;
636 skb_walk_frags(skb, iter)
637 nsgs += 1 + skb_shinfo(iter)->nr_frags;
639 memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
641 nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
642 sg_init_marker(urb->sg, nsgs);
645 return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
649 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
650 struct sk_buff *skb, struct mt76_wcid *wcid,
651 struct ieee80211_sta *sta)
653 struct usb_interface *intf = to_usb_interface(dev->dev);
654 struct usb_device *udev = interface_to_usbdev(intf);
655 u8 ep = q2ep(q->hw_idx);
656 struct mt76u_buf *buf;
661 if (q->queued == q->ndesc)
664 err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
668 buf = &q->entry[idx].ubuf;
671 err = mt76u_tx_build_sg(skb, buf->urb);
675 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
676 usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
677 mt76u_complete_tx, buf);
679 q->tail = (q->tail + 1) % q->ndesc;
680 q->entry[idx].skb = skb;
686 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
688 struct mt76u_buf *buf;
691 while (q->first != q->tail) {
692 buf = &q->entry[q->first].ubuf;
693 err = usb_submit_urb(buf->urb, GFP_ATOMIC);
696 set_bit(MT76_REMOVED, &dev->state);
698 dev_err(dev->dev, "tx urb submit failed:%d\n",
702 q->first = (q->first + 1) % q->ndesc;
706 static int mt76u_alloc_tx(struct mt76_dev *dev)
708 struct mt76u_buf *buf;
709 struct mt76_queue *q;
713 size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
714 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
716 spin_lock_init(&q->lock);
717 INIT_LIST_HEAD(&q->swq);
718 q->hw_idx = q2hwq(i);
720 q->entry = devm_kcalloc(dev->dev,
721 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
726 q->ndesc = MT_NUM_TX_ENTRIES;
727 for (j = 0; j < q->ndesc; j++) {
728 buf = &q->entry[j].ubuf;
731 buf->urb = usb_alloc_urb(0, GFP_KERNEL);
735 buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
743 static void mt76u_free_tx(struct mt76_dev *dev)
745 struct mt76_queue *q;
748 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
750 for (j = 0; j < q->ndesc; j++)
751 usb_free_urb(q->entry[j].ubuf.urb);
755 static void mt76u_stop_tx(struct mt76_dev *dev)
757 struct mt76_queue *q;
760 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
762 for (j = 0; j < q->ndesc; j++)
763 usb_kill_urb(q->entry[j].ubuf.urb);
767 void mt76u_stop_queues(struct mt76_dev *dev)
769 tasklet_disable(&dev->usb.rx_tasklet);
770 tasklet_disable(&dev->usb.tx_tasklet);
775 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
777 void mt76u_stop_stat_wk(struct mt76_dev *dev)
779 cancel_delayed_work_sync(&dev->usb.stat_work);
780 clear_bit(MT76_READING_STATS, &dev->state);
782 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
784 void mt76u_queues_deinit(struct mt76_dev *dev)
786 mt76u_stop_queues(dev);
791 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
793 int mt76u_alloc_queues(struct mt76_dev *dev)
797 err = mt76u_alloc_rx(dev);
801 err = mt76u_alloc_tx(dev);
807 mt76u_queues_deinit(dev);
810 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
812 static const struct mt76_queue_ops usb_queue_ops = {
813 .tx_queue_skb = mt76u_tx_queue_skb,
814 .kick = mt76u_tx_kick,
817 int mt76u_init(struct mt76_dev *dev,
818 struct usb_interface *intf)
820 static const struct mt76_bus_ops mt76u_ops = {
826 struct mt76_usb *usb = &dev->usb;
828 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
829 tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
830 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
831 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
833 init_completion(&usb->mcu.cmpl);
834 mutex_init(&usb->mcu.mutex);
836 mutex_init(&usb->usb_ctrl_mtx);
837 dev->bus = &mt76u_ops;
838 dev->queue_ops = &usb_queue_ops;
840 return mt76u_set_endpoints(intf, usb);
842 EXPORT_SYMBOL_GPL(mt76u_init);
844 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
845 MODULE_LICENSE("Dual BSD/GPL");