1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
6 #include <linux/dma-mapping.h>
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
12 #define Q_READ(_dev, _q, _field) ({ \
13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
20 _val = readl(&(_q)->regs->_field); \
24 #define Q_WRITE(_dev, _q, _field, _val) do { \
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
28 ((_q)->wed_regs + _offset), \
31 writel(_val, &(_q)->regs->_field); \
36 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
37 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
41 static struct mt76_txwi_cache *
42 mt76_alloc_txwi(struct mt76_dev *dev)
44 struct mt76_txwi_cache *t;
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 txwi = kzalloc(size, GFP_ATOMIC);
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
56 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
62 static struct mt76_txwi_cache *
63 mt76_alloc_rxwi(struct mt76_dev *dev)
65 struct mt76_txwi_cache *t;
67 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
75 static struct mt76_txwi_cache *
76 __mt76_get_txwi(struct mt76_dev *dev)
78 struct mt76_txwi_cache *t = NULL;
80 spin_lock(&dev->lock);
81 if (!list_empty(&dev->txwi_cache)) {
82 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
86 spin_unlock(&dev->lock);
91 static struct mt76_txwi_cache *
92 __mt76_get_rxwi(struct mt76_dev *dev)
94 struct mt76_txwi_cache *t = NULL;
96 spin_lock(&dev->wed_lock);
97 if (!list_empty(&dev->rxwi_cache)) {
98 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
102 spin_unlock(&dev->wed_lock);
107 static struct mt76_txwi_cache *
108 mt76_get_txwi(struct mt76_dev *dev)
110 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
115 return mt76_alloc_txwi(dev);
118 struct mt76_txwi_cache *
119 mt76_get_rxwi(struct mt76_dev *dev)
121 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
126 return mt76_alloc_rxwi(dev);
128 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
131 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
136 spin_lock(&dev->lock);
137 list_add(&t->list, &dev->txwi_cache);
138 spin_unlock(&dev->lock);
140 EXPORT_SYMBOL_GPL(mt76_put_txwi);
143 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
148 spin_lock(&dev->wed_lock);
149 list_add(&t->list, &dev->rxwi_cache);
150 spin_unlock(&dev->wed_lock);
152 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
155 mt76_free_pending_txwi(struct mt76_dev *dev)
157 struct mt76_txwi_cache *t;
160 while ((t = __mt76_get_txwi(dev)) != NULL) {
161 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
163 kfree(mt76_get_txwi_ptr(dev, t));
169 mt76_free_pending_rxwi(struct mt76_dev *dev)
171 struct mt76_txwi_cache *t;
174 while ((t = __mt76_get_rxwi(dev)) != NULL) {
176 skb_free_frag(t->ptr);
183 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
185 Q_WRITE(dev, q, desc_base, q->desc_dma);
186 Q_WRITE(dev, q, ring_size, q->ndesc);
187 q->head = Q_READ(dev, q, dma_idx);
192 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
199 /* clear descriptors */
200 for (i = 0; i < q->ndesc; i++)
201 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
203 Q_WRITE(dev, q, cpu_idx, 0);
204 Q_WRITE(dev, q, dma_idx, 0);
205 mt76_dma_sync_idx(dev, q);
209 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
210 struct mt76_queue_buf *buf, int nbufs, u32 info,
211 struct sk_buff *skb, void *txwi)
213 struct mt76_queue_entry *entry;
214 struct mt76_desc *desc;
218 for (i = 0; i < nbufs; i += 2, buf += 2) {
219 u32 buf0 = buf[0].addr, buf1 = 0;
222 q->head = (q->head + 1) % q->ndesc;
224 desc = &q->desc[idx];
225 entry = &q->entry[idx];
227 if ((q->flags & MT_QFLAG_WED) &&
228 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
229 struct mt76_txwi_cache *t = txwi;
235 rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
237 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
238 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
242 q->entry[q->head].txwi = DMA_DUMMY_DATA;
243 q->entry[q->head].skip_buf0 = true;
246 if (buf[0].skip_unmap)
247 entry->skip_buf0 = true;
248 entry->skip_buf1 = i == nbufs - 1;
250 entry->dma_addr[0] = buf[0].addr;
251 entry->dma_len[0] = buf[0].len;
253 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
255 entry->dma_addr[1] = buf[1].addr;
256 entry->dma_len[1] = buf[1].len;
258 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
259 if (buf[1].skip_unmap)
260 entry->skip_buf1 = true;
264 ctrl |= MT_DMA_CTL_LAST_SEC0;
265 else if (i == nbufs - 2)
266 ctrl |= MT_DMA_CTL_LAST_SEC1;
269 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
270 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
271 WRITE_ONCE(desc->info, cpu_to_le32(info));
272 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
277 q->entry[idx].txwi = txwi;
278 q->entry[idx].skb = skb;
279 q->entry[idx].wcid = 0xffff;
285 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
286 struct mt76_queue_entry *prev_e)
288 struct mt76_queue_entry *e = &q->entry[idx];
291 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
295 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
298 if (e->txwi == DMA_DUMMY_DATA)
301 if (e->skb == DMA_DUMMY_DATA)
305 memset(e, 0, sizeof(*e));
309 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
312 Q_WRITE(dev, q, cpu_idx, q->head);
316 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
318 struct mt76_queue_entry entry;
324 spin_lock_bh(&q->cleanup_lock);
328 last = Q_READ(dev, q, dma_idx);
330 while (q->queued > 0 && q->tail != last) {
331 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
332 mt76_queue_tx_complete(dev, q, &entry);
335 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
336 mt76_put_txwi(dev, entry.txwi);
339 if (!flush && q->tail == last)
340 last = Q_READ(dev, q, dma_idx);
342 spin_unlock_bh(&q->cleanup_lock);
345 spin_lock_bh(&q->lock);
346 mt76_dma_sync_idx(dev, q);
347 mt76_dma_kick_queue(dev, q);
348 spin_unlock_bh(&q->lock);
352 wake_up(&dev->tx_wait);
356 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
357 int *len, u32 *info, bool *more, bool *drop)
359 struct mt76_queue_entry *e = &q->entry[idx];
360 struct mt76_desc *desc = &q->desc[idx];
364 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
365 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
366 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
370 *info = le32_to_cpu(desc->info);
372 if ((q->flags & MT_QFLAG_WED) &&
373 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
374 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
375 le32_to_cpu(desc->buf1));
376 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
381 dma_unmap_single(dev->dma_dev, t->dma_addr,
382 SKB_WITH_OVERHEAD(q->buf_size),
389 mt76_put_rxwi(dev, t);
392 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
394 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
400 dma_unmap_single(dev->dma_dev, e->dma_addr[0],
401 SKB_WITH_OVERHEAD(q->buf_size),
409 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
410 int *len, u32 *info, bool *more, bool *drop)
419 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
420 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
423 q->tail = (q->tail + 1) % q->ndesc;
426 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
430 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
431 struct sk_buff *skb, u32 tx_info)
433 struct mt76_queue_buf buf = {};
436 if (q->queued + 1 >= q->ndesc - 1)
439 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
441 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
447 spin_lock_bh(&q->lock);
448 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
449 mt76_dma_kick_queue(dev, q);
450 spin_unlock_bh(&q->lock);
460 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
461 enum mt76_txq_id qid, struct sk_buff *skb,
462 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
464 struct ieee80211_tx_status status = {
467 struct mt76_tx_info tx_info = {
470 struct ieee80211_hw *hw;
471 int len, n = 0, ret = -ENOMEM;
472 struct mt76_txwi_cache *t;
473 struct sk_buff *iter;
477 t = mt76_get_txwi(dev);
481 txwi = mt76_get_txwi_ptr(dev, t);
483 skb->prev = skb->next = NULL;
484 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
485 mt76_insert_hdr_pad(skb);
487 len = skb_headlen(skb);
488 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
489 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
492 tx_info.buf[n].addr = t->dma_addr;
493 tx_info.buf[n++].len = dev->drv->txwi_size;
494 tx_info.buf[n].addr = addr;
495 tx_info.buf[n++].len = len;
497 skb_walk_frags(skb, iter) {
498 if (n == ARRAY_SIZE(tx_info.buf))
501 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
503 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
506 tx_info.buf[n].addr = addr;
507 tx_info.buf[n++].len = iter->len;
511 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
516 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
518 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
519 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
524 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
525 tx_info.info, tx_info.skb, t);
528 for (n--; n > 0; n--)
529 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
530 tx_info.buf[n].len, DMA_TO_DEVICE);
533 #ifdef CONFIG_NL80211_TESTMODE
534 /* fix tx_done accounting on queue overflow */
535 if (mt76_is_testmode_skb(dev, skb, &hw)) {
536 struct mt76_phy *phy = hw->priv;
538 if (tx_info.skb == phy->test.tx_skb)
543 mt76_put_txwi(dev, t);
546 status.skb = tx_info.skb;
547 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
548 ieee80211_tx_status_ext(hw, &status);
553 static struct page_frag_cache *
554 mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
556 struct page_frag_cache *rx_page = &q->rx_page;
558 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
559 if ((q->flags & MT_QFLAG_WED) &&
560 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
561 rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
567 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
569 struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
570 int len = SKB_WITH_OVERHEAD(q->buf_size);
571 int frames = 0, offset = q->buf_offset;
577 spin_lock_bh(&q->lock);
579 while (q->queued < q->ndesc - 1) {
580 struct mt76_txwi_cache *t = NULL;
581 struct mt76_queue_buf qbuf;
584 if ((q->flags & MT_QFLAG_WED) &&
585 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
586 t = mt76_get_rxwi(dev);
591 buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
595 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
596 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
601 qbuf.addr = addr + offset;
602 qbuf.len = len - offset;
603 qbuf.skip_unmap = false;
604 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
609 mt76_dma_kick_queue(dev, q);
611 spin_unlock_bh(&q->lock);
617 mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
619 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
620 struct mtk_wed_device *wed = &dev->mmio.wed;
624 if (!mtk_wed_device_active(wed))
625 q->flags &= ~MT_QFLAG_WED;
627 if (!(q->flags & MT_QFLAG_WED))
630 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
631 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
635 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false);
637 q->wed_regs = wed->tx_ring[ring].reg_base;
639 case MT76_WED_Q_TXFREE:
640 /* WED txfree queue needs ring to be initialized before setup */
642 mt76_dma_queue_reset(dev, q);
643 mt76_dma_rx_fill(dev, q);
646 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
648 q->wed_regs = wed->txfree_ring.reg_base;
651 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false);
653 q->wed_regs = wed->rx_ring[ring].reg_base;
666 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
667 int idx, int n_desc, int bufsize,
672 spin_lock_init(&q->lock);
673 spin_lock_init(&q->cleanup_lock);
675 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
677 q->buf_size = bufsize;
680 size = q->ndesc * sizeof(struct mt76_desc);
681 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
685 size = q->ndesc * sizeof(*q->entry);
686 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
690 ret = mt76_dma_wed_setup(dev, q);
694 if (q->flags != MT_WED_Q_TXFREE)
695 mt76_dma_queue_reset(dev, q);
701 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
710 spin_lock_bh(&q->lock);
712 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
718 spin_unlock_bh(&q->lock);
723 page = virt_to_page(q->rx_page.va);
724 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
725 memset(&q->rx_page, 0, sizeof(q->rx_page));
729 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
731 struct mt76_queue *q = &dev->q_rx[qid];
737 for (i = 0; i < q->ndesc; i++)
738 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
740 mt76_dma_rx_cleanup(dev, q);
741 mt76_dma_sync_idx(dev, q);
742 mt76_dma_rx_fill(dev, q);
747 dev_kfree_skb(q->rx_head);
752 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
753 int len, bool more, u32 info)
755 struct sk_buff *skb = q->rx_head;
756 struct skb_shared_info *shinfo = skb_shinfo(skb);
757 int nr_frags = shinfo->nr_frags;
759 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
760 struct page *page = virt_to_head_page(data);
761 int offset = data - page_address(page) + q->buf_offset;
763 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
772 if (nr_frags < ARRAY_SIZE(shinfo->frags))
773 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
779 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
781 int len, data_len, done = 0, dma_idx;
784 bool check_ddone = false;
787 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
788 q->flags == MT_WED_Q_TXFREE) {
789 dma_idx = Q_READ(dev, q, dma_idx);
793 while (done < budget) {
798 if (q->tail == dma_idx)
799 dma_idx = Q_READ(dev, q, dma_idx);
801 if (q->tail == dma_idx)
805 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
814 data_len = q->buf_size;
816 data_len = SKB_WITH_OVERHEAD(q->buf_size);
818 if (data_len < len + q->buf_offset) {
819 dev_kfree_skb(q->rx_head);
825 mt76_add_fragment(dev, q, data, len, more, info);
829 if (!more && dev->drv->rx_check &&
830 !(dev->drv->rx_check(dev, data, len)))
833 skb = build_skb(data, q->buf_size);
837 skb_reserve(skb, q->buf_offset);
839 *(u32 *)skb->cb = info;
849 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
856 mt76_dma_rx_fill(dev, q);
860 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
862 struct mt76_dev *dev;
863 int qid, done = 0, cur;
865 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
866 qid = napi - dev->napi;
871 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
872 mt76_rx_poll_complete(dev, qid, napi);
874 } while (cur && done < budget);
878 if (done < budget && napi_complete(napi))
879 dev->drv->rx_poll_complete(dev, qid);
883 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
886 mt76_dma_init(struct mt76_dev *dev,
887 int (*poll)(struct napi_struct *napi, int budget))
891 init_dummy_netdev(&dev->napi_dev);
892 init_dummy_netdev(&dev->tx_napi_dev);
893 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
894 wiphy_name(dev->hw->wiphy));
895 dev->napi_dev.threaded = 1;
897 mt76_for_each_q_rx(dev, i) {
898 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
899 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
900 napi_enable(&dev->napi[i]);
906 static const struct mt76_queue_ops mt76_dma_ops = {
907 .init = mt76_dma_init,
908 .alloc = mt76_dma_alloc_queue,
909 .reset_q = mt76_dma_queue_reset,
910 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
911 .tx_queue_skb = mt76_dma_tx_queue_skb,
912 .tx_cleanup = mt76_dma_tx_cleanup,
913 .rx_cleanup = mt76_dma_rx_cleanup,
914 .rx_reset = mt76_dma_rx_reset,
915 .kick = mt76_dma_kick_queue,
918 void mt76_dma_attach(struct mt76_dev *dev)
920 dev->queue_ops = &mt76_dma_ops;
922 EXPORT_SYMBOL_GPL(mt76_dma_attach);
924 void mt76_dma_cleanup(struct mt76_dev *dev)
928 mt76_worker_disable(&dev->tx_worker);
929 netif_napi_del(&dev->tx_napi);
931 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
932 struct mt76_phy *phy = dev->phys[i];
938 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
939 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
942 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
943 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
945 mt76_for_each_q_rx(dev, i) {
946 struct mt76_queue *q = &dev->q_rx[i];
948 netif_napi_del(&dev->napi[i]);
949 if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
950 mt76_dma_rx_cleanup(dev, q);
953 mt76_free_pending_txwi(dev);
954 mt76_free_pending_rxwi(dev);
956 if (mtk_wed_device_active(&dev->mmio.wed))
957 mtk_wed_device_detach(&dev->mmio.wed);
959 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);