1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
6 #include <linux/dma-mapping.h>
11 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
12 int idx, int n_desc, int bufsize,
18 spin_lock_init(&q->lock);
20 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
22 q->buf_size = bufsize;
25 size = q->ndesc * sizeof(struct mt76_desc);
26 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
30 size = q->ndesc * sizeof(*q->entry);
31 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
35 /* clear descriptors */
36 for (i = 0; i < q->ndesc; i++)
37 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
39 writel(q->desc_dma, &q->regs->desc_base);
40 writel(0, &q->regs->cpu_idx);
41 writel(0, &q->regs->dma_idx);
42 writel(q->ndesc, &q->regs->ring_size);
48 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
49 struct mt76_queue_buf *buf, int nbufs, u32 info,
50 struct sk_buff *skb, void *txwi)
52 struct mt76_desc *desc;
57 q->entry[q->head].txwi = DMA_DUMMY_DATA;
58 q->entry[q->head].skip_buf0 = true;
61 for (i = 0; i < nbufs; i += 2, buf += 2) {
62 u32 buf0 = buf[0].addr, buf1 = 0;
64 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
67 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
71 ctrl |= MT_DMA_CTL_LAST_SEC0;
72 else if (i == nbufs - 2)
73 ctrl |= MT_DMA_CTL_LAST_SEC1;
76 q->head = (q->head + 1) % q->ndesc;
80 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
81 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
82 WRITE_ONCE(desc->info, cpu_to_le32(info));
83 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
88 q->entry[idx].txwi = txwi;
89 q->entry[idx].skb = skb;
95 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
96 struct mt76_queue_entry *prev_e)
98 struct mt76_queue_entry *e = &q->entry[idx];
99 __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
100 u32 ctrl = le32_to_cpu(__ctrl);
103 __le32 addr = READ_ONCE(q->desc[idx].buf0);
104 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
106 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
110 if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
111 __le32 addr = READ_ONCE(q->desc[idx].buf1);
112 u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
114 dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
118 if (e->txwi == DMA_DUMMY_DATA)
121 if (e->skb == DMA_DUMMY_DATA)
125 memset(e, 0, sizeof(*e));
129 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
131 writel(q->desc_dma, &q->regs->desc_base);
132 writel(q->ndesc, &q->regs->ring_size);
133 q->head = readl(&q->regs->dma_idx);
135 writel(q->head, &q->regs->cpu_idx);
139 mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
141 struct mt76_sw_queue *sq = &dev->q_tx[qid];
142 struct mt76_queue *q = sq->q;
143 struct mt76_queue_entry entry;
144 unsigned int n_swq_queued[8] = {};
145 unsigned int n_queued = 0;
155 last = readl(&q->regs->dma_idx);
157 while ((q->queued > n_queued) && q->tail != last) {
158 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
160 n_swq_queued[entry.qid]++;
162 q->tail = (q->tail + 1) % q->ndesc;
166 dev->drv->tx_complete_skb(dev, qid, &entry);
169 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
170 mt76_put_txwi(dev, entry.txwi);
174 if (!flush && q->tail == last)
175 last = readl(&q->regs->dma_idx);
178 spin_lock_bh(&q->lock);
180 q->queued -= n_queued;
181 for (i = 0; i < 4; i++) {
182 if (!n_swq_queued[i])
185 dev->q_tx[i].swq_queued -= n_swq_queued[i];
189 for (i = 0; i < 4; i++) {
190 if (!n_swq_queued[i])
193 dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
197 mt76_dma_sync_idx(dev, q);
199 wake = wake && q->stopped &&
200 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
205 wake_up(&dev->tx_wait);
207 spin_unlock_bh(&q->lock);
210 ieee80211_wake_queue(dev->hw, qid);
214 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
215 int *len, u32 *info, bool *more)
217 struct mt76_queue_entry *e = &q->entry[idx];
218 struct mt76_desc *desc = &q->desc[idx];
221 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
223 buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
225 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
226 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
227 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
231 *info = le32_to_cpu(desc->info);
233 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
240 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
241 int *len, u32 *info, bool *more)
249 if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
252 q->tail = (q->tail + 1) % q->ndesc;
255 return mt76_dma_get_buf(dev, q, idx, len, info, more);
259 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
261 writel(q->head, &q->regs->cpu_idx);
265 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
266 struct sk_buff *skb, u32 tx_info)
268 struct mt76_queue *q = dev->q_tx[qid].q;
269 struct mt76_queue_buf buf;
272 addr = dma_map_single(dev->dev, skb->data, skb->len,
274 if (unlikely(dma_mapping_error(dev->dev, addr)))
280 spin_lock_bh(&q->lock);
281 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
282 mt76_dma_kick_queue(dev, q);
283 spin_unlock_bh(&q->lock);
289 mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
290 struct sk_buff *skb, struct mt76_wcid *wcid,
291 struct ieee80211_sta *sta)
293 struct mt76_queue *q = dev->q_tx[qid].q;
294 struct mt76_tx_info tx_info = {
297 struct ieee80211_hw *hw;
298 int len, n = 0, ret = -ENOMEM;
299 struct mt76_queue_entry e;
300 struct mt76_txwi_cache *t;
301 struct sk_buff *iter;
305 t = mt76_get_txwi(dev);
307 hw = mt76_tx_status_get_hw(dev, skb);
308 ieee80211_free_txskb(hw, skb);
311 txwi = mt76_get_txwi_ptr(dev, t);
313 skb->prev = skb->next = NULL;
314 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
315 mt76_insert_hdr_pad(skb);
317 len = skb_headlen(skb);
318 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
319 if (unlikely(dma_mapping_error(dev->dev, addr)))
322 tx_info.buf[n].addr = t->dma_addr;
323 tx_info.buf[n++].len = dev->drv->txwi_size;
324 tx_info.buf[n].addr = addr;
325 tx_info.buf[n++].len = len;
327 skb_walk_frags(skb, iter) {
328 if (n == ARRAY_SIZE(tx_info.buf))
331 addr = dma_map_single(dev->dev, iter->data, iter->len,
333 if (unlikely(dma_mapping_error(dev->dev, addr)))
336 tx_info.buf[n].addr = addr;
337 tx_info.buf[n++].len = iter->len;
341 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
343 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
344 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
349 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
354 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
355 tx_info.info, tx_info.skb, t);
358 for (n--; n > 0; n--)
359 dma_unmap_single(dev->dev, tx_info.buf[n].addr,
360 tx_info.buf[n].len, DMA_TO_DEVICE);
365 dev->drv->tx_complete_skb(dev, qid, &e);
366 mt76_put_txwi(dev, t);
371 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
376 int len = SKB_WITH_OVERHEAD(q->buf_size);
377 int offset = q->buf_offset;
379 spin_lock_bh(&q->lock);
381 while (q->queued < q->ndesc - 1) {
382 struct mt76_queue_buf qbuf;
384 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
388 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
389 if (unlikely(dma_mapping_error(dev->dev, addr))) {
394 qbuf.addr = addr + offset;
395 qbuf.len = len - offset;
396 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
401 mt76_dma_kick_queue(dev, q);
403 spin_unlock_bh(&q->lock);
409 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
415 spin_lock_bh(&q->lock);
417 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
423 spin_unlock_bh(&q->lock);
428 page = virt_to_page(q->rx_page.va);
429 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
430 memset(&q->rx_page, 0, sizeof(q->rx_page));
434 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
436 struct mt76_queue *q = &dev->q_rx[qid];
439 for (i = 0; i < q->ndesc; i++)
440 q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE);
442 mt76_dma_rx_cleanup(dev, q);
443 mt76_dma_sync_idx(dev, q);
444 mt76_dma_rx_fill(dev, q);
449 dev_kfree_skb(q->rx_head);
454 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
457 struct page *page = virt_to_head_page(data);
458 int offset = data - page_address(page);
459 struct sk_buff *skb = q->rx_head;
461 offset += q->buf_offset;
462 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
469 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
473 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
475 int len, data_len, done = 0;
480 while (done < budget) {
483 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
488 data_len = q->buf_size;
490 data_len = SKB_WITH_OVERHEAD(q->buf_size);
492 if (data_len < len + q->buf_offset) {
493 dev_kfree_skb(q->rx_head);
501 mt76_add_fragment(dev, q, data, len, more);
505 skb = build_skb(data, q->buf_size);
510 skb_reserve(skb, q->buf_offset);
512 if (q == &dev->q_rx[MT_RXQ_MCU]) {
513 u32 *rxfce = (u32 *)skb->cb;
525 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
528 mt76_dma_rx_fill(dev, q);
533 mt76_dma_rx_poll(struct napi_struct *napi, int budget)
535 struct mt76_dev *dev;
536 int qid, done = 0, cur;
538 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
539 qid = napi - dev->napi;
544 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
545 mt76_rx_poll_complete(dev, qid, napi);
547 } while (cur && done < budget);
551 if (done < budget && napi_complete(napi))
552 dev->drv->rx_poll_complete(dev, qid);
558 mt76_dma_init(struct mt76_dev *dev)
562 init_dummy_netdev(&dev->napi_dev);
564 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
565 netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
567 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
568 napi_enable(&dev->napi[i]);
574 static const struct mt76_queue_ops mt76_dma_ops = {
575 .init = mt76_dma_init,
576 .alloc = mt76_dma_alloc_queue,
577 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
578 .tx_queue_skb = mt76_dma_tx_queue_skb,
579 .tx_cleanup = mt76_dma_tx_cleanup,
580 .rx_reset = mt76_dma_rx_reset,
581 .kick = mt76_dma_kick_queue,
584 void mt76_dma_attach(struct mt76_dev *dev)
586 dev->queue_ops = &mt76_dma_ops;
588 EXPORT_SYMBOL_GPL(mt76_dma_attach);
590 void mt76_dma_cleanup(struct mt76_dev *dev)
594 netif_napi_del(&dev->tx_napi);
595 for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
596 mt76_dma_tx_cleanup(dev, i, true);
598 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) {
599 netif_napi_del(&dev->napi[i]);
600 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
603 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);