1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
6 #include <linux/dma-mapping.h>
10 #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
12 #define Q_READ(_dev, _q, _field) ({ \
13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
20 _val = readl(&(_q)->regs->_field); \
24 #define Q_WRITE(_dev, _q, _field, _val) do { \
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
28 ((_q)->wed_regs + _offset), \
31 writel(_val, &(_q)->regs->_field); \
36 #define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
37 #define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
41 static struct mt76_txwi_cache *
42 mt76_alloc_txwi(struct mt76_dev *dev)
44 struct mt76_txwi_cache *t;
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 txwi = kzalloc(size, GFP_ATOMIC);
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
56 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
61 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
67 static struct mt76_txwi_cache *
68 mt76_alloc_rxwi(struct mt76_dev *dev)
70 struct mt76_txwi_cache *t;
72 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
80 static struct mt76_txwi_cache *
81 __mt76_get_txwi(struct mt76_dev *dev)
83 struct mt76_txwi_cache *t = NULL;
85 spin_lock(&dev->lock);
86 if (!list_empty(&dev->txwi_cache)) {
87 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
91 spin_unlock(&dev->lock);
96 static struct mt76_txwi_cache *
97 __mt76_get_rxwi(struct mt76_dev *dev)
99 struct mt76_txwi_cache *t = NULL;
101 spin_lock_bh(&dev->wed_lock);
102 if (!list_empty(&dev->rxwi_cache)) {
103 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
107 spin_unlock_bh(&dev->wed_lock);
112 static struct mt76_txwi_cache *
113 mt76_get_txwi(struct mt76_dev *dev)
115 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
120 return mt76_alloc_txwi(dev);
123 struct mt76_txwi_cache *
124 mt76_get_rxwi(struct mt76_dev *dev)
126 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
131 return mt76_alloc_rxwi(dev);
133 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
136 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
141 spin_lock(&dev->lock);
142 list_add(&t->list, &dev->txwi_cache);
143 spin_unlock(&dev->lock);
145 EXPORT_SYMBOL_GPL(mt76_put_txwi);
148 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
153 spin_lock_bh(&dev->wed_lock);
154 list_add(&t->list, &dev->rxwi_cache);
155 spin_unlock_bh(&dev->wed_lock);
157 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
160 mt76_free_pending_txwi(struct mt76_dev *dev)
162 struct mt76_txwi_cache *t;
165 while ((t = __mt76_get_txwi(dev)) != NULL) {
166 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
168 kfree(mt76_get_txwi_ptr(dev, t));
174 mt76_free_pending_rxwi(struct mt76_dev *dev)
176 struct mt76_txwi_cache *t;
179 while ((t = __mt76_get_rxwi(dev)) != NULL) {
181 mt76_put_page_pool_buf(t->ptr, false);
186 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
191 Q_WRITE(dev, q, desc_base, q->desc_dma);
192 Q_WRITE(dev, q, ring_size, q->ndesc);
193 q->head = Q_READ(dev, q, dma_idx);
198 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
205 /* clear descriptors */
206 for (i = 0; i < q->ndesc; i++)
207 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
209 Q_WRITE(dev, q, cpu_idx, 0);
210 Q_WRITE(dev, q, dma_idx, 0);
211 mt76_dma_sync_idx(dev, q);
215 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
216 struct mt76_queue_buf *buf, void *data)
218 struct mt76_desc *desc = &q->desc[q->head];
219 struct mt76_queue_entry *entry = &q->entry[q->head];
220 struct mt76_txwi_cache *txwi = NULL;
225 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
227 if (mt76_queue_is_wed_rx(q)) {
228 txwi = mt76_get_rxwi(dev);
232 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
234 mt76_put_rxwi(dev, txwi);
238 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
239 ctrl |= MT_DMA_CTL_TO_HOST;
242 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
243 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
244 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
245 WRITE_ONCE(desc->info, 0);
247 entry->dma_addr[0] = buf->addr;
248 entry->dma_len[0] = buf->len;
251 entry->wcid = 0xffff;
252 entry->skip_buf1 = true;
253 q->head = (q->head + 1) % q->ndesc;
260 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
261 struct mt76_queue_buf *buf, int nbufs, u32 info,
262 struct sk_buff *skb, void *txwi)
264 struct mt76_queue_entry *entry;
265 struct mt76_desc *desc;
270 q->entry[q->head].txwi = DMA_DUMMY_DATA;
271 q->entry[q->head].skip_buf0 = true;
274 for (i = 0; i < nbufs; i += 2, buf += 2) {
275 u32 buf0 = buf[0].addr, buf1 = 0;
278 next = (q->head + 1) % q->ndesc;
280 desc = &q->desc[idx];
281 entry = &q->entry[idx];
283 if (buf[0].skip_unmap)
284 entry->skip_buf0 = true;
285 entry->skip_buf1 = i == nbufs - 1;
287 entry->dma_addr[0] = buf[0].addr;
288 entry->dma_len[0] = buf[0].len;
290 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
292 entry->dma_addr[1] = buf[1].addr;
293 entry->dma_len[1] = buf[1].len;
295 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
296 if (buf[1].skip_unmap)
297 entry->skip_buf1 = true;
301 ctrl |= MT_DMA_CTL_LAST_SEC0;
302 else if (i == nbufs - 2)
303 ctrl |= MT_DMA_CTL_LAST_SEC1;
305 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
306 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
307 WRITE_ONCE(desc->info, cpu_to_le32(info));
308 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
314 q->entry[idx].txwi = txwi;
315 q->entry[idx].skb = skb;
316 q->entry[idx].wcid = 0xffff;
322 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
323 struct mt76_queue_entry *prev_e)
325 struct mt76_queue_entry *e = &q->entry[idx];
328 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
332 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
335 if (e->txwi == DMA_DUMMY_DATA)
339 memset(e, 0, sizeof(*e));
343 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
346 Q_WRITE(dev, q, cpu_idx, q->head);
350 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
352 struct mt76_queue_entry entry;
358 spin_lock_bh(&q->cleanup_lock);
362 last = Q_READ(dev, q, dma_idx);
364 while (q->queued > 0 && q->tail != last) {
365 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
366 mt76_queue_tx_complete(dev, q, &entry);
369 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
370 mt76_put_txwi(dev, entry.txwi);
373 if (!flush && q->tail == last)
374 last = Q_READ(dev, q, dma_idx);
376 spin_unlock_bh(&q->cleanup_lock);
379 spin_lock_bh(&q->lock);
380 mt76_dma_sync_idx(dev, q);
381 mt76_dma_kick_queue(dev, q);
382 spin_unlock_bh(&q->lock);
386 wake_up(&dev->tx_wait);
390 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
391 int *len, u32 *info, bool *more, bool *drop)
393 struct mt76_queue_entry *e = &q->entry[idx];
394 struct mt76_desc *desc = &q->desc[idx];
398 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
399 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
400 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
404 *info = le32_to_cpu(desc->info);
406 if (mt76_queue_is_wed_rx(q)) {
407 u32 buf1 = le32_to_cpu(desc->buf1);
408 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
409 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
414 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
415 SKB_WITH_OVERHEAD(q->buf_size),
416 page_pool_get_dma_dir(q->page_pool));
422 mt76_put_rxwi(dev, t);
425 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
427 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
430 *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
435 dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
436 SKB_WITH_OVERHEAD(q->buf_size),
437 page_pool_get_dma_dir(q->page_pool));
444 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
445 int *len, u32 *info, bool *more, bool *drop)
454 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
455 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
458 q->tail = (q->tail + 1) % q->ndesc;
461 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
465 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
466 struct sk_buff *skb, u32 tx_info)
468 struct mt76_queue_buf buf = {};
471 if (test_bit(MT76_MCU_RESET, &dev->phy.state))
474 if (q->queued + 1 >= q->ndesc - 1)
477 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
479 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
485 spin_lock_bh(&q->lock);
486 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
487 mt76_dma_kick_queue(dev, q);
488 spin_unlock_bh(&q->lock);
498 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
499 enum mt76_txq_id qid, struct sk_buff *skb,
500 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
502 struct ieee80211_tx_status status = {
505 struct mt76_tx_info tx_info = {
508 struct ieee80211_hw *hw;
509 int len, n = 0, ret = -ENOMEM;
510 struct mt76_txwi_cache *t;
511 struct sk_buff *iter;
515 if (test_bit(MT76_RESET, &dev->phy.state))
518 t = mt76_get_txwi(dev);
522 txwi = mt76_get_txwi_ptr(dev, t);
524 skb->prev = skb->next = NULL;
525 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
526 mt76_insert_hdr_pad(skb);
528 len = skb_headlen(skb);
529 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
530 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
533 tx_info.buf[n].addr = t->dma_addr;
534 tx_info.buf[n++].len = dev->drv->txwi_size;
535 tx_info.buf[n].addr = addr;
536 tx_info.buf[n++].len = len;
538 skb_walk_frags(skb, iter) {
539 if (n == ARRAY_SIZE(tx_info.buf))
542 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
544 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
547 tx_info.buf[n].addr = addr;
548 tx_info.buf[n++].len = iter->len;
552 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
557 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
559 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
560 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
565 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
566 tx_info.info, tx_info.skb, t);
569 for (n--; n > 0; n--)
570 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
571 tx_info.buf[n].len, DMA_TO_DEVICE);
574 #ifdef CONFIG_NL80211_TESTMODE
575 /* fix tx_done accounting on queue overflow */
576 if (mt76_is_testmode_skb(dev, skb, &hw)) {
577 struct mt76_phy *phy = hw->priv;
579 if (tx_info.skb == phy->test.tx_skb)
584 mt76_put_txwi(dev, t);
587 status.skb = tx_info.skb;
588 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
589 spin_lock_bh(&dev->rx_lock);
590 ieee80211_tx_status_ext(hw, &status);
591 spin_unlock_bh(&dev->rx_lock);
597 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
600 int len = SKB_WITH_OVERHEAD(q->buf_size);
606 spin_lock_bh(&q->lock);
608 while (q->queued < q->ndesc - 1) {
609 enum dma_data_direction dir;
610 struct mt76_queue_buf qbuf;
615 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
619 addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
620 dir = page_pool_get_dma_dir(q->page_pool);
621 dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
623 qbuf.addr = addr + q->buf_offset;
624 qbuf.len = len - q->buf_offset;
625 qbuf.skip_unmap = false;
626 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
627 mt76_put_page_pool_buf(buf, allow_direct);
634 mt76_dma_kick_queue(dev, q);
636 spin_unlock_bh(&q->lock);
641 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
643 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
644 struct mtk_wed_device *wed = &dev->mmio.wed;
652 if (!mtk_wed_device_active(wed))
653 q->flags &= ~MT_QFLAG_WED;
655 if (!(q->flags & MT_QFLAG_WED))
658 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
659 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
663 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
665 q->wed_regs = wed->tx_ring[ring].reg_base;
667 case MT76_WED_Q_TXFREE:
668 /* WED txfree queue needs ring to be initialized before setup */
670 mt76_dma_queue_reset(dev, q);
671 mt76_dma_rx_fill(dev, q, false);
674 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
676 q->wed_regs = wed->txfree_ring.reg_base;
679 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
681 q->wed_regs = wed->rx_ring[ring].reg_base;
692 EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
695 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
696 int idx, int n_desc, int bufsize,
701 spin_lock_init(&q->lock);
702 spin_lock_init(&q->cleanup_lock);
704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
706 q->buf_size = bufsize;
709 size = q->ndesc * sizeof(struct mt76_desc);
710 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
714 size = q->ndesc * sizeof(*q->entry);
715 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
719 ret = mt76_create_page_pool(dev, q);
723 ret = mt76_dma_wed_setup(dev, q, false);
727 if (q->flags != MT_WED_Q_TXFREE)
728 mt76_dma_queue_reset(dev, q);
734 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
743 spin_lock_bh(&q->lock);
744 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
745 spin_unlock_bh(&q->lock);
750 mt76_put_page_pool_buf(buf, false);
753 spin_lock_bh(&q->lock);
755 dev_kfree_skb(q->rx_head);
759 spin_unlock_bh(&q->lock);
763 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
765 struct mt76_queue *q = &dev->q_rx[qid];
771 for (i = 0; i < q->ndesc; i++)
772 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
774 mt76_dma_rx_cleanup(dev, q);
776 /* reset WED rx queues */
777 mt76_dma_wed_setup(dev, q, true);
778 if (q->flags != MT_WED_Q_TXFREE) {
779 mt76_dma_sync_idx(dev, q);
780 mt76_dma_rx_fill(dev, q, false);
785 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
786 int len, bool more, u32 info)
788 struct sk_buff *skb = q->rx_head;
789 struct skb_shared_info *shinfo = skb_shinfo(skb);
790 int nr_frags = shinfo->nr_frags;
792 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
793 struct page *page = virt_to_head_page(data);
794 int offset = data - page_address(page) + q->buf_offset;
796 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
798 mt76_put_page_pool_buf(data, true);
805 if (nr_frags < ARRAY_SIZE(shinfo->frags))
806 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
812 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
814 int len, data_len, done = 0, dma_idx;
817 bool check_ddone = false;
820 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
821 q->flags == MT_WED_Q_TXFREE) {
822 dma_idx = Q_READ(dev, q, dma_idx);
826 while (done < budget) {
831 if (q->tail == dma_idx)
832 dma_idx = Q_READ(dev, q, dma_idx);
834 if (q->tail == dma_idx)
838 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
847 data_len = q->buf_size;
849 data_len = SKB_WITH_OVERHEAD(q->buf_size);
851 if (data_len < len + q->buf_offset) {
852 dev_kfree_skb(q->rx_head);
858 mt76_add_fragment(dev, q, data, len, more, info);
862 if (!more && dev->drv->rx_check &&
863 !(dev->drv->rx_check(dev, data, len)))
866 skb = napi_build_skb(data, q->buf_size);
870 skb_reserve(skb, q->buf_offset);
871 skb_mark_for_recycle(skb);
873 *(u32 *)skb->cb = info;
883 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
887 mt76_put_page_pool_buf(data, true);
890 mt76_dma_rx_fill(dev, q, true);
894 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
896 struct mt76_dev *dev;
897 int qid, done = 0, cur;
899 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
900 qid = napi - dev->napi;
905 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
906 mt76_rx_poll_complete(dev, qid, napi);
908 } while (cur && done < budget);
912 if (done < budget && napi_complete(napi))
913 dev->drv->rx_poll_complete(dev, qid);
917 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
920 mt76_dma_init(struct mt76_dev *dev,
921 int (*poll)(struct napi_struct *napi, int budget))
925 init_dummy_netdev(&dev->napi_dev);
926 init_dummy_netdev(&dev->tx_napi_dev);
927 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
928 wiphy_name(dev->hw->wiphy));
929 dev->napi_dev.threaded = 1;
930 init_completion(&dev->mmio.wed_reset);
931 init_completion(&dev->mmio.wed_reset_complete);
933 mt76_for_each_q_rx(dev, i) {
934 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
935 mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
936 napi_enable(&dev->napi[i]);
942 static const struct mt76_queue_ops mt76_dma_ops = {
943 .init = mt76_dma_init,
944 .alloc = mt76_dma_alloc_queue,
945 .reset_q = mt76_dma_queue_reset,
946 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
947 .tx_queue_skb = mt76_dma_tx_queue_skb,
948 .tx_cleanup = mt76_dma_tx_cleanup,
949 .rx_cleanup = mt76_dma_rx_cleanup,
950 .rx_reset = mt76_dma_rx_reset,
951 .kick = mt76_dma_kick_queue,
954 void mt76_dma_attach(struct mt76_dev *dev)
956 dev->queue_ops = &mt76_dma_ops;
958 EXPORT_SYMBOL_GPL(mt76_dma_attach);
960 void mt76_dma_cleanup(struct mt76_dev *dev)
964 mt76_worker_disable(&dev->tx_worker);
965 netif_napi_del(&dev->tx_napi);
967 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
968 struct mt76_phy *phy = dev->phys[i];
974 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
975 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
978 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
979 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
981 mt76_for_each_q_rx(dev, i) {
982 struct mt76_queue *q = &dev->q_rx[i];
984 netif_napi_del(&dev->napi[i]);
985 mt76_dma_rx_cleanup(dev, q);
987 page_pool_destroy(q->page_pool);
990 mt76_free_pending_txwi(dev);
991 mt76_free_pending_rxwi(dev);
993 if (mtk_wed_device_active(&dev->mmio.wed))
994 mtk_wed_device_detach(&dev->mmio.wed);
996 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);