1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
14 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
16 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
19 if (unlikely(len < 10))
21 hdrlen = ieee80211_hdrlen(hdr->frame_control);
22 if (unlikely(hdrlen > len))
27 static struct sk_buff *
28 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29 void *data, u32 seg_len, u32 truesize, struct page *p)
32 u32 true_len, hdr_len = 0, copy, frag;
34 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
38 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39 if (!true_len || true_len > seg_len)
42 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
46 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47 skb_put_data(skb, data, hdr_len);
54 /* If not doing paged RX allocated skb will always have enough space */
55 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56 frag = true_len - copy;
58 skb_put_data(skb, data, copy);
62 skb_add_rx_frag(skb, 0, p, data - page_address(p),
70 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
76 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77 u32 seg_len, struct page *p)
80 struct mt7601u_rxwi *rxwi;
81 u32 fce_info, truesize = seg_len;
83 /* DMA_INFO field at the beginning of the segment contains only some of
84 * the information, we need to read the FCE descriptor from the end.
86 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
87 seg_len -= MT_FCE_INFO_LEN;
89 data += MT_DMA_HDR_LEN;
90 seg_len -= MT_DMA_HDR_LEN;
92 rxwi = (struct mt7601u_rxwi *) data;
93 data += sizeof(struct mt7601u_rxwi);
94 seg_len -= sizeof(struct mt7601u_rxwi);
96 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
97 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
98 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
99 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
101 trace_mt_rx(dev, rxwi, fce_info);
103 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
107 spin_lock(&dev->mac_lock);
108 ieee80211_rx(dev->hw, skb);
109 spin_unlock(&dev->mac_lock);
112 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
114 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
115 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
116 u16 dma_len = get_unaligned_le16(data);
118 if (data_len < min_seg_len ||
119 WARN_ON_ONCE(!dma_len) ||
120 WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
121 WARN_ON_ONCE(dma_len & 0x3))
124 return MT_DMA_HDRS + dma_len;
128 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
130 u32 seg_len, data_len = e->urb->actual_length;
131 u8 *data = page_address(e->p);
132 struct page *new_p = NULL;
135 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
138 /* Copy if there is very little data in the buffer. */
140 new_p = dev_alloc_pages(MT_RX_ORDER);
142 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
143 mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
151 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
154 /* we have one extra ref from the allocator */
160 static struct mt7601u_dma_buf_rx *
161 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
163 struct mt7601u_rx_queue *q = &dev->rx_q;
164 struct mt7601u_dma_buf_rx *buf = NULL;
167 spin_lock_irqsave(&dev->rx_lock, flags);
172 buf = &q->e[q->start];
174 q->start = (q->start + 1) % q->entries;
176 spin_unlock_irqrestore(&dev->rx_lock, flags);
181 static void mt7601u_complete_rx(struct urb *urb)
183 struct mt7601u_dev *dev = urb->context;
184 struct mt7601u_rx_queue *q = &dev->rx_q;
187 /* do no schedule rx tasklet if urb has been unlinked
188 * or the device has been removed
190 switch (urb->status) {
196 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
203 spin_lock_irqsave(&dev->rx_lock, flags);
204 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
207 q->end = (q->end + 1) % q->entries;
209 tasklet_schedule(&dev->rx_tasklet);
211 spin_unlock_irqrestore(&dev->rx_lock, flags);
214 static void mt7601u_rx_tasklet(struct tasklet_struct *t)
216 struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
217 struct mt7601u_dma_buf_rx *e;
219 while ((e = mt7601u_rx_get_pending_entry(dev))) {
223 mt7601u_rx_process_entry(dev, e);
224 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
228 static void mt7601u_complete_tx(struct urb *urb)
230 struct mt7601u_tx_queue *q = urb->context;
231 struct mt7601u_dev *dev = q->dev;
235 switch (urb->status) {
241 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
248 spin_lock_irqsave(&dev->tx_lock, flags);
249 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
252 skb = q->e[q->start].skb;
253 q->e[q->start].skb = NULL;
254 trace_mt_tx_dma_done(dev, skb);
256 __skb_queue_tail(&dev->tx_skb_done, skb);
257 tasklet_schedule(&dev->tx_tasklet);
259 if (q->used == q->entries - q->entries / 8)
260 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
262 q->start = (q->start + 1) % q->entries;
265 spin_unlock_irqrestore(&dev->tx_lock, flags);
268 static void mt7601u_tx_tasklet(struct tasklet_struct *t)
270 struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
271 struct sk_buff_head skbs;
274 __skb_queue_head_init(&skbs);
276 spin_lock_irqsave(&dev->tx_lock, flags);
278 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
279 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
280 queue_delayed_work(dev->stat_wq, &dev->stat_work,
281 msecs_to_jiffies(10));
283 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
285 spin_unlock_irqrestore(&dev->tx_lock, flags);
287 while (!skb_queue_empty(&skbs)) {
288 struct sk_buff *skb = __skb_dequeue(&skbs);
290 mt7601u_tx_status(dev, skb);
294 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
295 struct sk_buff *skb, u8 ep)
297 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
298 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
299 struct mt7601u_dma_buf_tx *e;
300 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
304 spin_lock_irqsave(&dev->tx_lock, flags);
306 if (WARN_ON(q->entries <= q->used)) {
312 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
313 mt7601u_complete_tx, q);
314 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
316 /* Special-handle ENODEV from TX urb submission because it will
317 * often be the first ENODEV we see after device is removed.
320 set_bit(MT7601U_STATE_REMOVED, &dev->state);
322 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
327 q->end = (q->end + 1) % q->entries;
331 if (q->used >= q->entries)
332 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
334 spin_unlock_irqrestore(&dev->tx_lock, flags);
339 /* Map hardware Q to USB endpoint number */
340 static u8 q2ep(u8 qid)
342 /* TODO: take management packets to queue 5 */
346 /* Map USB endpoint number to Q id in the DMA engine */
347 static enum mt76_qsel ep2dmaq(u8 ep)
354 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
355 struct mt76_wcid *wcid, int hw_q)
361 dma_flags = MT_TXD_PKT_INFO_80211;
362 if (wcid->hw_key_idx == 0xff)
363 dma_flags |= MT_TXD_PKT_INFO_WIV;
365 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
369 ret = mt7601u_dma_submit_tx(dev, skb, ep);
371 ieee80211_free_txskb(dev->hw, skb);
378 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
382 for (i = 0; i < dev->rx_q.entries; i++)
383 usb_poison_urb(dev->rx_q.e[i].urb);
386 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
387 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
389 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
390 u8 *buf = page_address(e->p);
394 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
396 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
397 mt7601u_complete_rx, dev);
399 trace_mt_submit_urb(dev, e->urb);
400 ret = usb_submit_urb(e->urb, gfp);
402 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
407 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
411 for (i = 0; i < dev->rx_q.entries; i++) {
412 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
420 static void mt7601u_free_rx(struct mt7601u_dev *dev)
424 for (i = 0; i < dev->rx_q.entries; i++) {
425 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
426 usb_free_urb(dev->rx_q.e[i].urb);
430 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
434 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
436 dev->rx_q.entries = N_RX_ENTRIES;
438 for (i = 0; i < N_RX_ENTRIES; i++) {
439 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
440 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
442 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
449 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
453 for (i = 0; i < q->entries; i++) {
454 usb_poison_urb(q->e[i].urb);
456 mt7601u_tx_status(q->dev, q->e[i].skb);
457 usb_free_urb(q->e[i].urb);
461 static void mt7601u_free_tx(struct mt7601u_dev *dev)
468 for (i = 0; i < __MT_EP_OUT_MAX; i++)
469 mt7601u_free_tx_queue(&dev->tx_q[i]);
472 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
473 struct mt7601u_tx_queue *q)
478 q->entries = N_TX_ENTRIES;
480 for (i = 0; i < N_TX_ENTRIES; i++) {
481 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
489 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
493 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
494 sizeof(*dev->tx_q), GFP_KERNEL);
498 for (i = 0; i < __MT_EP_OUT_MAX; i++)
499 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
505 int mt7601u_dma_init(struct mt7601u_dev *dev)
509 tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
510 tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
512 ret = mt7601u_alloc_tx(dev);
515 ret = mt7601u_alloc_rx(dev);
519 ret = mt7601u_submit_rx(dev);
525 mt7601u_dma_cleanup(dev);
529 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
531 mt7601u_kill_rx(dev);
533 tasklet_kill(&dev->rx_tasklet);
535 mt7601u_free_rx(dev);
536 mt7601u_free_tx(dev);
538 tasklet_kill(&dev->tx_tasklet);