1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
6 #include <linux/module.h>
11 #define MT_VEND_REQ_MAX_RETRY 10
12 #define MT_VEND_REQ_TOUT_MS 300
14 static bool disable_usb_sg;
15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
18 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
19 u8 req_type, u16 val, u16 offset,
20 void *buf, size_t len)
22 struct usb_interface *uintf = to_usb_interface(dev->dev);
23 struct usb_device *udev = interface_to_usbdev(uintf);
27 lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
29 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
30 : usb_sndctrlpipe(udev, 0);
31 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
32 if (test_bit(MT76_REMOVED, &dev->phy.state))
35 ret = usb_control_msg(udev, pipe, req, req_type, val,
36 offset, buf, len, MT_VEND_REQ_TOUT_MS);
38 set_bit(MT76_REMOVED, &dev->phy.state);
39 if (ret >= 0 || ret == -ENODEV)
41 usleep_range(5000, 10000);
44 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
49 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
50 u8 req_type, u16 val, u16 offset,
51 void *buf, size_t len)
55 mutex_lock(&dev->usb.usb_ctrl_mtx);
56 ret = __mt76u_vendor_request(dev, req, req_type,
57 val, offset, buf, len);
58 trace_usb_reg_wr(dev, offset, val);
59 mutex_unlock(&dev->usb.usb_ctrl_mtx);
63 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
65 static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
67 struct mt76_usb *usb = &dev->usb;
71 ret = __mt76u_vendor_request(dev, req,
72 USB_DIR_IN | USB_TYPE_VENDOR,
73 addr >> 16, addr, usb->data,
75 if (ret == sizeof(__le32))
76 data = get_unaligned_le32(usb->data);
77 trace_usb_reg_rr(dev, addr, data);
82 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
86 switch (addr & MT_VEND_TYPE_MASK) {
87 case MT_VEND_TYPE_EEPROM:
88 req = MT_VEND_READ_EEPROM;
90 case MT_VEND_TYPE_CFG:
91 req = MT_VEND_READ_CFG;
94 req = MT_VEND_MULTI_READ;
98 return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
101 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
105 mutex_lock(&dev->usb.usb_ctrl_mtx);
106 ret = __mt76u_rr(dev, addr);
107 mutex_unlock(&dev->usb.usb_ctrl_mtx);
112 static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
116 mutex_lock(&dev->usb.usb_ctrl_mtx);
117 ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
118 mutex_unlock(&dev->usb.usb_ctrl_mtx);
123 static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
126 struct mt76_usb *usb = &dev->usb;
128 put_unaligned_le32(val, usb->data);
129 __mt76u_vendor_request(dev, req,
130 USB_DIR_OUT | USB_TYPE_VENDOR,
131 addr >> 16, addr, usb->data,
133 trace_usb_reg_wr(dev, addr, val);
136 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
140 switch (addr & MT_VEND_TYPE_MASK) {
141 case MT_VEND_TYPE_CFG:
142 req = MT_VEND_WRITE_CFG;
145 req = MT_VEND_MULTI_WRITE;
148 ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
151 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
153 mutex_lock(&dev->usb.usb_ctrl_mtx);
154 __mt76u_wr(dev, addr, val);
155 mutex_unlock(&dev->usb.usb_ctrl_mtx);
158 static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
160 mutex_lock(&dev->usb.usb_ctrl_mtx);
161 ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
162 mutex_unlock(&dev->usb.usb_ctrl_mtx);
165 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
168 mutex_lock(&dev->usb.usb_ctrl_mtx);
169 val |= __mt76u_rr(dev, addr) & ~mask;
170 __mt76u_wr(dev, addr, val);
171 mutex_unlock(&dev->usb.usb_ctrl_mtx);
176 static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
179 mutex_lock(&dev->usb.usb_ctrl_mtx);
180 val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
181 ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
182 mutex_unlock(&dev->usb.usb_ctrl_mtx);
187 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
188 const void *data, int len)
190 struct mt76_usb *usb = &dev->usb;
191 const u8 *val = data;
193 int current_batch_size;
196 /* Assure that always a multiple of 4 bytes are copied,
197 * otherwise beacons can be corrupted.
198 * See: "mt76: round up length on mt76_wr_copy"
199 * Commit 850e8f6fbd5d0003b0
201 len = round_up(len, 4);
203 mutex_lock(&usb->usb_ctrl_mtx);
205 current_batch_size = min_t(int, usb->data_len, len - i);
206 memcpy(usb->data, val + i, current_batch_size);
207 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
208 USB_DIR_OUT | USB_TYPE_VENDOR,
209 0, offset + i, usb->data,
214 i += current_batch_size;
216 mutex_unlock(&usb->usb_ctrl_mtx);
219 static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
220 const void *data, int len)
222 struct mt76_usb *usb = &dev->usb;
223 int ret, i = 0, batch_len;
224 const u8 *val = data;
226 len = round_up(len, 4);
227 mutex_lock(&usb->usb_ctrl_mtx);
229 batch_len = min_t(int, usb->data_len, len - i);
230 memcpy(usb->data, val + i, batch_len);
231 ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
232 USB_DIR_OUT | USB_TYPE_VENDOR,
233 (offset + i) >> 16, offset + i,
234 usb->data, batch_len);
240 mutex_unlock(&usb->usb_ctrl_mtx);
244 mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
247 struct mt76_usb *usb = &dev->usb;
248 int i = 0, batch_len, ret;
251 len = round_up(len, 4);
252 mutex_lock(&usb->usb_ctrl_mtx);
254 batch_len = min_t(int, usb->data_len, len - i);
255 ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
256 USB_DIR_IN | USB_TYPE_VENDOR,
257 (offset + i) >> 16, offset + i,
258 usb->data, batch_len);
262 memcpy(val + i, usb->data, batch_len);
265 mutex_unlock(&usb->usb_ctrl_mtx);
268 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
269 const u16 offset, const u32 val)
271 mutex_lock(&dev->usb.usb_ctrl_mtx);
272 __mt76u_vendor_request(dev, req,
273 USB_DIR_OUT | USB_TYPE_VENDOR,
274 val & 0xffff, offset, NULL, 0);
275 __mt76u_vendor_request(dev, req,
276 USB_DIR_OUT | USB_TYPE_VENDOR,
277 val >> 16, offset + 2, NULL, 0);
278 mutex_unlock(&dev->usb.usb_ctrl_mtx);
280 EXPORT_SYMBOL_GPL(mt76u_single_wr);
283 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
284 const struct mt76_reg_pair *data, int len)
286 struct mt76_usb *usb = &dev->usb;
288 mutex_lock(&usb->usb_ctrl_mtx);
290 __mt76u_wr(dev, base + data->reg, data->value);
294 mutex_unlock(&usb->usb_ctrl_mtx);
300 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
301 const struct mt76_reg_pair *data, int n)
303 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
304 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
306 return mt76u_req_wr_rp(dev, base, data, n);
310 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
313 struct mt76_usb *usb = &dev->usb;
315 mutex_lock(&usb->usb_ctrl_mtx);
317 data->value = __mt76u_rr(dev, base + data->reg);
321 mutex_unlock(&usb->usb_ctrl_mtx);
327 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
328 struct mt76_reg_pair *data, int n)
330 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
331 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
333 return mt76u_req_rd_rp(dev, base, data, n);
336 static bool mt76u_check_sg(struct mt76_dev *dev)
338 struct usb_interface *uintf = to_usb_interface(dev->dev);
339 struct usb_device *udev = interface_to_usbdev(uintf);
341 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
342 (udev->bus->no_sg_constraint ||
343 udev->speed == USB_SPEED_WIRELESS));
347 mt76u_set_endpoints(struct usb_interface *intf,
348 struct mt76_usb *usb)
350 struct usb_host_interface *intf_desc = intf->cur_altsetting;
351 struct usb_endpoint_descriptor *ep_desc;
352 int i, in_ep = 0, out_ep = 0;
354 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
355 ep_desc = &intf_desc->endpoint[i].desc;
357 if (usb_endpoint_is_bulk_in(ep_desc) &&
358 in_ep < __MT_EP_IN_MAX) {
359 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
361 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
362 out_ep < __MT_EP_OUT_MAX) {
363 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
368 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
374 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
379 for (i = 0; i < nsgs; i++) {
384 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
388 page = virt_to_head_page(data);
389 offset = data - page_address(page);
390 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
396 for (j = nsgs; j < urb->num_sgs; j++)
397 skb_free_frag(sg_virt(&urb->sg[j]));
401 urb->num_sgs = max_t(int, i, urb->num_sgs);
402 urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
403 sg_init_marker(urb->sg, urb->num_sgs);
405 return i ? : -ENOMEM;
409 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
410 struct urb *urb, int nsgs, gfp_t gfp)
412 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
414 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
415 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
417 urb->transfer_buffer_length = q->buf_size;
418 urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
420 return urb->transfer_buffer ? 0 : -ENOMEM;
424 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
427 unsigned int size = sizeof(struct urb);
430 size += sg_max_size * sizeof(struct scatterlist);
432 e->urb = kzalloc(size, GFP_KERNEL);
436 usb_init_urb(e->urb);
438 if (dev->usb.sg_en && sg_max_size > 0)
439 e->urb->sg = (struct scatterlist *)(e->urb + 1);
445 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
446 struct mt76_queue_entry *e)
448 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
451 sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
452 err = mt76u_urb_alloc(dev, e, sg_size);
456 return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
459 static void mt76u_urb_free(struct urb *urb)
463 for (i = 0; i < urb->num_sgs; i++)
464 skb_free_frag(sg_virt(&urb->sg[i]));
466 if (urb->transfer_buffer)
467 skb_free_frag(urb->transfer_buffer);
473 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
474 struct urb *urb, usb_complete_t complete_fn,
477 struct usb_interface *uintf = to_usb_interface(dev->dev);
478 struct usb_device *udev = interface_to_usbdev(uintf);
481 if (dir == USB_DIR_IN)
482 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
484 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
488 urb->complete = complete_fn;
489 urb->context = context;
493 mt76u_get_next_rx_entry(struct mt76_queue *q)
495 struct urb *urb = NULL;
498 spin_lock_irqsave(&q->lock, flags);
500 urb = q->entry[q->tail].urb;
501 q->tail = (q->tail + 1) % q->ndesc;
504 spin_unlock_irqrestore(&q->lock, flags);
510 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
513 u16 dma_len, min_len;
515 dma_len = get_unaligned_le16(data);
516 if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
519 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
520 if (data_len < min_len || !dma_len ||
521 dma_len + MT_DMA_HDR_LEN > data_len ||
527 static struct sk_buff *
528 mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
529 int len, int buf_size)
531 int head_room, drv_flags = dev->drv->drv_flags;
534 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
535 if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
538 /* slow path, not enough space for data and
541 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
545 skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
546 data += head_room + MT_SKB_HEAD_LEN;
547 page = virt_to_head_page(data);
548 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
549 page, data - page_address(page),
550 len - MT_SKB_HEAD_LEN, buf_size);
556 skb = build_skb(data, buf_size);
560 skb_reserve(skb, head_room);
567 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
570 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
571 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
572 int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
575 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
578 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
582 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
583 data_len = min_t(int, len, data_len - head_room);
584 skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
589 while (len > 0 && nsgs < urb->num_sgs) {
590 data_len = min_t(int, len, urb->sg[nsgs].length);
591 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
592 sg_page(&urb->sg[nsgs]),
593 urb->sg[nsgs].offset, data_len,
598 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
603 static void mt76u_complete_rx(struct urb *urb)
605 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
606 struct mt76_queue *q = urb->context;
609 trace_rx_urb(dev, urb);
611 switch (urb->status) {
617 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
624 spin_lock_irqsave(&q->lock, flags);
625 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
628 q->head = (q->head + 1) % q->ndesc;
630 tasklet_schedule(&dev->usb.rx_tasklet);
632 spin_unlock_irqrestore(&q->lock, flags);
636 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
639 int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
641 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
642 mt76u_complete_rx, &dev->q_rx[qid]);
643 trace_submit_urb(dev, urb);
645 return usb_submit_urb(urb, GFP_ATOMIC);
649 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
651 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
656 urb = mt76u_get_next_rx_entry(q);
660 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
662 err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
666 mt76u_submit_rx_buf(dev, qid, urb);
668 if (qid == MT_RXQ_MAIN)
669 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
672 static void mt76u_rx_tasklet(unsigned long data)
674 struct mt76_dev *dev = (struct mt76_dev *)data;
678 mt76_for_each_q_rx(dev, i)
679 mt76u_process_rx_queue(dev, &dev->q_rx[i]);
684 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
686 struct mt76_queue *q = &dev->q_rx[qid];
690 spin_lock_irqsave(&q->lock, flags);
691 for (i = 0; i < q->ndesc; i++) {
692 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
696 q->head = q->tail = 0;
698 spin_unlock_irqrestore(&q->lock, flags);
704 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
706 struct mt76_queue *q = &dev->q_rx[qid];
709 spin_lock_init(&q->lock);
710 q->entry = devm_kcalloc(dev->dev,
711 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
716 q->ndesc = MT_NUM_RX_ENTRIES;
717 q->buf_size = PAGE_SIZE;
719 for (i = 0; i < q->ndesc; i++) {
720 err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
725 return mt76u_submit_rx_buffers(dev, qid);
728 int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
730 return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
732 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
735 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
740 for (i = 0; i < q->ndesc; i++)
741 mt76u_urb_free(q->entry[i].urb);
746 page = virt_to_page(q->rx_page.va);
747 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
748 memset(&q->rx_page, 0, sizeof(q->rx_page));
751 static void mt76u_free_rx(struct mt76_dev *dev)
755 mt76_for_each_q_rx(dev, i)
756 mt76u_free_rx_queue(dev, &dev->q_rx[i]);
759 void mt76u_stop_rx(struct mt76_dev *dev)
763 mt76_for_each_q_rx(dev, i) {
764 struct mt76_queue *q = &dev->q_rx[i];
767 for (j = 0; j < q->ndesc; j++)
768 usb_poison_urb(q->entry[j].urb);
771 tasklet_kill(&dev->usb.rx_tasklet);
773 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
775 int mt76u_resume_rx(struct mt76_dev *dev)
779 mt76_for_each_q_rx(dev, i) {
780 struct mt76_queue *q = &dev->q_rx[i];
783 for (j = 0; j < q->ndesc; j++)
784 usb_unpoison_urb(q->entry[j].urb);
786 err = mt76u_submit_rx_buffers(dev, i);
793 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
795 static void mt76u_tx_worker(struct mt76_worker *w)
797 struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
798 struct mt76_queue_entry entry;
799 struct mt76_queue *q;
803 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
806 while (q->queued > 0) {
807 if (!q->entry[q->tail].done)
810 entry = q->entry[q->tail];
811 q->entry[q->tail].done = false;
813 mt76_queue_tx_complete(dev, q, &entry);
816 wake = q->stopped && q->queued < q->ndesc - 8;
821 wake_up(&dev->tx_wait);
823 mt76_txq_schedule(&dev->phy, i);
825 if (dev->drv->tx_status_data &&
826 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
827 queue_work(dev->wq, &dev->usb.stat_work);
829 ieee80211_wake_queue(dev->hw, i);
833 static void mt76u_tx_status_data(struct work_struct *work)
835 struct mt76_usb *usb;
836 struct mt76_dev *dev;
840 usb = container_of(work, struct mt76_usb, stat_work);
841 dev = container_of(usb, struct mt76_dev, usb);
844 if (test_bit(MT76_REMOVED, &dev->phy.state))
847 if (!dev->drv->tx_status_data(dev, &update))
852 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
853 queue_work(dev->wq, &usb->stat_work);
855 clear_bit(MT76_READING_STATS, &dev->phy.state);
858 static void mt76u_complete_tx(struct urb *urb)
860 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
861 struct mt76_queue_entry *e = urb->context;
863 if (mt76u_urb_error(urb))
864 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
867 mt76_worker_schedule(&dev->tx_worker);
871 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
874 urb->transfer_buffer_length = skb->len;
876 if (!dev->usb.sg_en) {
877 urb->transfer_buffer = skb->data;
881 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
882 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
890 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
891 struct sk_buff *skb, struct mt76_wcid *wcid,
892 struct ieee80211_sta *sta)
894 struct mt76_queue *q = dev->q_tx[qid];
895 struct mt76_tx_info tx_info = {
901 if (q->queued == q->ndesc)
904 skb->prev = skb->next = NULL;
905 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
909 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
913 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
914 q->entry[idx].urb, mt76u_complete_tx,
917 q->head = (q->head + 1) % q->ndesc;
918 q->entry[idx].skb = tx_info.skb;
924 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
929 while (q->first != q->head) {
930 urb = q->entry[q->first].urb;
932 trace_submit_urb(dev, urb);
933 err = usb_submit_urb(urb, GFP_ATOMIC);
936 set_bit(MT76_REMOVED, &dev->phy.state);
938 dev_err(dev->dev, "tx urb submit failed:%d\n",
942 q->first = (q->first + 1) % q->ndesc;
946 static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
948 if (mt76_chip(dev) == 0x7663) {
949 static const u8 lmac_queue_map[] = {
950 /* ac to lmac mapping */
951 [IEEE80211_AC_BK] = 0,
952 [IEEE80211_AC_BE] = 1,
953 [IEEE80211_AC_VI] = 2,
954 [IEEE80211_AC_VO] = 4,
957 if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
960 return lmac_queue_map[ac];
963 return mt76_ac_to_hwq(ac);
966 static int mt76u_alloc_tx(struct mt76_dev *dev)
968 struct mt76_queue *q;
971 for (i = 0; i <= MT_TXQ_PSD; i++) {
972 if (i >= IEEE80211_NUM_ACS) {
973 dev->q_tx[i] = dev->q_tx[0];
977 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
981 spin_lock_init(&q->lock);
982 q->hw_idx = mt76u_ac_to_hwq(dev, i);
985 q->entry = devm_kcalloc(dev->dev,
986 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
991 q->ndesc = MT_NUM_TX_ENTRIES;
992 for (j = 0; j < q->ndesc; j++) {
993 err = mt76u_urb_alloc(dev, &q->entry[j],
1002 static void mt76u_free_tx(struct mt76_dev *dev)
1006 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1007 struct mt76_queue *q;
1014 for (j = 0; j < q->ndesc; j++)
1015 usb_free_urb(q->entry[j].urb);
1019 void mt76u_stop_tx(struct mt76_dev *dev)
1023 mt76_worker_disable(&dev->tx_worker);
1025 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
1028 struct mt76_queue_entry entry;
1029 struct mt76_queue *q;
1032 dev_err(dev->dev, "timed out waiting for pending tx\n");
1034 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1039 for (j = 0; j < q->ndesc; j++)
1040 usb_kill_urb(q->entry[j].urb);
1043 /* On device removal we maight queue skb's, but mt76u_tx_kick()
1044 * will fail to submit urb, cleanup those skb's manually.
1046 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1051 entry = q->entry[q->tail];
1052 q->entry[q->tail].done = false;
1054 mt76_queue_tx_complete(dev, q, &entry);
1058 cancel_work_sync(&dev->usb.stat_work);
1059 clear_bit(MT76_READING_STATS, &dev->phy.state);
1061 mt76_worker_enable(&dev->tx_worker);
1063 mt76_tx_status_check(dev, NULL, true);
1065 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1067 void mt76u_queues_deinit(struct mt76_dev *dev)
1075 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1077 int mt76u_alloc_queues(struct mt76_dev *dev)
1081 err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1085 return mt76u_alloc_tx(dev);
1087 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1089 static const struct mt76_queue_ops usb_queue_ops = {
1090 .tx_queue_skb = mt76u_tx_queue_skb,
1091 .kick = mt76u_tx_kick,
1094 int mt76u_init(struct mt76_dev *dev,
1095 struct usb_interface *intf, bool ext)
1097 static struct mt76_bus_ops mt76u_ops = {
1098 .read_copy = mt76u_read_copy_ext,
1099 .wr_rp = mt76u_wr_rp,
1100 .rd_rp = mt76u_rd_rp,
1101 .type = MT76_BUS_USB,
1103 struct usb_device *udev = interface_to_usbdev(intf);
1104 struct mt76_usb *usb = &dev->usb;
1107 mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
1108 mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
1109 mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
1110 mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
1112 dev->tx_worker.fn = mt76u_tx_worker;
1113 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
1114 INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1116 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
1117 if (usb->data_len < 32)
1120 usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1124 mutex_init(&usb->usb_ctrl_mtx);
1125 dev->bus = &mt76u_ops;
1126 dev->queue_ops = &usb_queue_ops;
1128 dev_set_drvdata(&udev->dev, dev);
1130 usb->sg_en = mt76u_check_sg(dev);
1132 err = mt76u_set_endpoints(intf, usb);
1139 destroy_workqueue(dev->wq);
1143 EXPORT_SYMBOL_GPL(mt76u_init);
1145 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1146 MODULE_LICENSE("Dual BSD/GPL");