iwl4965: do not process non-QOS frames on txq->sched_retry path
[linux-2.6-microblaze.git] / drivers / net / wireless / mediatek / mt7601u / dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4  */
5
6 #include "mt7601u.h"
7 #include "dma.h"
8 #include "usb.h"
9 #include "trace.h"
10
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12                                  struct mt7601u_dma_buf_rx *e, gfp_t gfp);
13
14 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
15 {
16         const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17         unsigned int hdrlen;
18
19         if (unlikely(len < 10))
20                 return 0;
21         hdrlen = ieee80211_hdrlen(hdr->frame_control);
22         if (unlikely(hdrlen > len))
23                 return 0;
24         return hdrlen;
25 }
26
27 static struct sk_buff *
28 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29                         void *data, u32 seg_len, u32 truesize, struct page *p)
30 {
31         struct sk_buff *skb;
32         u32 true_len, hdr_len = 0, copy, frag;
33
34         skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35         if (!skb)
36                 return NULL;
37
38         true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39         if (!true_len || true_len > seg_len)
40                 goto bad_frame;
41
42         hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
43         if (!hdr_len)
44                 goto bad_frame;
45
46         if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47                 skb_put_data(skb, data, hdr_len);
48
49                 data += hdr_len + 2;
50                 true_len -= hdr_len;
51                 hdr_len = 0;
52         }
53
54         /* If not doing paged RX allocated skb will always have enough space */
55         copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56         frag = true_len - copy;
57
58         skb_put_data(skb, data, copy);
59         data += copy;
60
61         if (frag) {
62                 skb_add_rx_frag(skb, 0, p, data - page_address(p),
63                                 frag, truesize);
64                 get_page(p);
65         }
66
67         return skb;
68
69 bad_frame:
70         dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
71                             true_len, hdr_len);
72         dev_kfree_skb(skb);
73         return NULL;
74 }
75
76 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77                                    u32 seg_len, struct page *p,
78                                    struct list_head *list)
79 {
80         struct sk_buff *skb;
81         struct mt7601u_rxwi *rxwi;
82         u32 fce_info, truesize = seg_len;
83
84         /* DMA_INFO field at the beginning of the segment contains only some of
85          * the information, we need to read the FCE descriptor from the end.
86          */
87         fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
88         seg_len -= MT_FCE_INFO_LEN;
89
90         data += MT_DMA_HDR_LEN;
91         seg_len -= MT_DMA_HDR_LEN;
92
93         rxwi = (struct mt7601u_rxwi *) data;
94         data += sizeof(struct mt7601u_rxwi);
95         seg_len -= sizeof(struct mt7601u_rxwi);
96
97         if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
98                 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
99         if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
100                 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
101
102         trace_mt_rx(dev, rxwi, fce_info);
103
104         skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
105         if (!skb)
106                 return;
107
108         local_bh_disable();
109         rcu_read_lock();
110
111         ieee80211_rx_list(dev->hw, NULL, skb, list);
112
113         rcu_read_unlock();
114         local_bh_enable();
115 }
116
117 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
118 {
119         u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
120                 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
121         u16 dma_len = get_unaligned_le16(data);
122
123         if (data_len < min_seg_len ||
124             WARN_ON_ONCE(!dma_len) ||
125             WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
126             WARN_ON_ONCE(dma_len & 0x3))
127                 return 0;
128
129         return MT_DMA_HDRS + dma_len;
130 }
131
132 static void
133 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
134 {
135         u32 seg_len, data_len = e->urb->actual_length;
136         u8 *data = page_address(e->p);
137         struct page *new_p = NULL;
138         LIST_HEAD(list);
139         int cnt = 0;
140
141         if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
142                 return;
143
144         /* Copy if there is very little data in the buffer. */
145         if (data_len > 512)
146                 new_p = dev_alloc_pages(MT_RX_ORDER);
147
148         while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
149                 mt7601u_rx_process_seg(dev, data, seg_len,
150                                        new_p ? e->p : NULL, &list);
151
152                 data_len -= seg_len;
153                 data += seg_len;
154                 cnt++;
155         }
156
157         if (cnt > 1)
158                 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
159
160         netif_receive_skb_list(&list);
161
162         if (new_p) {
163                 /* we have one extra ref from the allocator */
164                 __free_pages(e->p, MT_RX_ORDER);
165
166                 e->p = new_p;
167         }
168 }
169
170 static struct mt7601u_dma_buf_rx *
171 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
172 {
173         struct mt7601u_rx_queue *q = &dev->rx_q;
174         struct mt7601u_dma_buf_rx *buf = NULL;
175         unsigned long flags;
176
177         spin_lock_irqsave(&dev->rx_lock, flags);
178
179         if (!q->pending)
180                 goto out;
181
182         buf = &q->e[q->start];
183         q->pending--;
184         q->start = (q->start + 1) % q->entries;
185 out:
186         spin_unlock_irqrestore(&dev->rx_lock, flags);
187
188         return buf;
189 }
190
191 static void mt7601u_complete_rx(struct urb *urb)
192 {
193         struct mt7601u_dev *dev = urb->context;
194         struct mt7601u_rx_queue *q = &dev->rx_q;
195         unsigned long flags;
196
197         /* do no schedule rx tasklet if urb has been unlinked
198          * or the device has been removed
199          */
200         switch (urb->status) {
201         case -ECONNRESET:
202         case -ESHUTDOWN:
203         case -ENOENT:
204         case -EPROTO:
205                 return;
206         default:
207                 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
208                                     urb->status);
209                 fallthrough;
210         case 0:
211                 break;
212         }
213
214         spin_lock_irqsave(&dev->rx_lock, flags);
215         if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
216                 goto out;
217
218         q->end = (q->end + 1) % q->entries;
219         q->pending++;
220         tasklet_schedule(&dev->rx_tasklet);
221 out:
222         spin_unlock_irqrestore(&dev->rx_lock, flags);
223 }
224
225 static void mt7601u_rx_tasklet(struct tasklet_struct *t)
226 {
227         struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet);
228         struct mt7601u_dma_buf_rx *e;
229
230         while ((e = mt7601u_rx_get_pending_entry(dev))) {
231                 if (e->urb->status)
232                         continue;
233
234                 mt7601u_rx_process_entry(dev, e);
235                 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
236         }
237 }
238
239 static void mt7601u_complete_tx(struct urb *urb)
240 {
241         struct mt7601u_tx_queue *q = urb->context;
242         struct mt7601u_dev *dev = q->dev;
243         struct sk_buff *skb;
244         unsigned long flags;
245
246         switch (urb->status) {
247         case -ECONNRESET:
248         case -ESHUTDOWN:
249         case -ENOENT:
250         case -EPROTO:
251                 return;
252         default:
253                 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
254                                     urb->status);
255                 fallthrough;
256         case 0:
257                 break;
258         }
259
260         spin_lock_irqsave(&dev->tx_lock, flags);
261         if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
262                 goto out;
263
264         skb = q->e[q->start].skb;
265         q->e[q->start].skb = NULL;
266         trace_mt_tx_dma_done(dev, skb);
267
268         __skb_queue_tail(&dev->tx_skb_done, skb);
269         tasklet_schedule(&dev->tx_tasklet);
270
271         if (q->used == q->entries - q->entries / 8)
272                 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
273
274         q->start = (q->start + 1) % q->entries;
275         q->used--;
276 out:
277         spin_unlock_irqrestore(&dev->tx_lock, flags);
278 }
279
280 static void mt7601u_tx_tasklet(struct tasklet_struct *t)
281 {
282         struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet);
283         struct sk_buff_head skbs;
284         unsigned long flags;
285
286         __skb_queue_head_init(&skbs);
287
288         spin_lock_irqsave(&dev->tx_lock, flags);
289
290         set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
291         if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
292                 queue_delayed_work(dev->stat_wq, &dev->stat_work,
293                                    msecs_to_jiffies(10));
294
295         skb_queue_splice_init(&dev->tx_skb_done, &skbs);
296
297         spin_unlock_irqrestore(&dev->tx_lock, flags);
298
299         while (!skb_queue_empty(&skbs)) {
300                 struct sk_buff *skb = __skb_dequeue(&skbs);
301
302                 mt7601u_tx_status(dev, skb);
303         }
304 }
305
306 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
307                                  struct sk_buff *skb, u8 ep)
308 {
309         struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
310         unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
311         struct mt7601u_dma_buf_tx *e;
312         struct mt7601u_tx_queue *q = &dev->tx_q[ep];
313         unsigned long flags;
314         int ret;
315
316         spin_lock_irqsave(&dev->tx_lock, flags);
317
318         if (WARN_ON(q->entries <= q->used)) {
319                 ret = -ENOSPC;
320                 goto out;
321         }
322
323         e = &q->e[q->end];
324         e->skb = skb;
325         usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
326                           mt7601u_complete_tx, q);
327         ret = usb_submit_urb(e->urb, GFP_ATOMIC);
328         if (ret) {
329                 /* Special-handle ENODEV from TX urb submission because it will
330                  * often be the first ENODEV we see after device is removed.
331                  */
332                 if (ret == -ENODEV)
333                         set_bit(MT7601U_STATE_REMOVED, &dev->state);
334                 else
335                         dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
336                                 ret);
337                 goto out;
338         }
339
340         q->end = (q->end + 1) % q->entries;
341         q->used++;
342
343         if (q->used >= q->entries)
344                 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
345 out:
346         spin_unlock_irqrestore(&dev->tx_lock, flags);
347
348         return ret;
349 }
350
351 /* Map hardware Q to USB endpoint number */
352 static u8 q2ep(u8 qid)
353 {
354         /* TODO: take management packets to queue 5 */
355         return qid + 1;
356 }
357
358 /* Map USB endpoint number to Q id in the DMA engine */
359 static enum mt76_qsel ep2dmaq(u8 ep)
360 {
361         if (ep == 5)
362                 return MT_QSEL_MGMT;
363         return MT_QSEL_EDCA;
364 }
365
366 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
367                            struct mt76_wcid *wcid, int hw_q)
368 {
369         u8 ep = q2ep(hw_q);
370         u32 dma_flags;
371         int ret;
372
373         dma_flags = MT_TXD_PKT_INFO_80211;
374         if (wcid->hw_key_idx == 0xff)
375                 dma_flags |= MT_TXD_PKT_INFO_WIV;
376
377         ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
378         if (ret)
379                 return ret;
380
381         ret = mt7601u_dma_submit_tx(dev, skb, ep);
382         if (ret) {
383                 ieee80211_free_txskb(dev->hw, skb);
384                 return ret;
385         }
386
387         return 0;
388 }
389
390 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
391 {
392         int i;
393
394         for (i = 0; i < dev->rx_q.entries; i++)
395                 usb_poison_urb(dev->rx_q.e[i].urb);
396 }
397
398 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
399                                  struct mt7601u_dma_buf_rx *e, gfp_t gfp)
400 {
401         struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
402         u8 *buf = page_address(e->p);
403         unsigned pipe;
404         int ret;
405
406         pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
407
408         usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
409                           mt7601u_complete_rx, dev);
410
411         trace_mt_submit_urb(dev, e->urb);
412         ret = usb_submit_urb(e->urb, gfp);
413         if (ret)
414                 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
415
416         return ret;
417 }
418
419 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
420 {
421         int i, ret;
422
423         for (i = 0; i < dev->rx_q.entries; i++) {
424                 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
425                 if (ret)
426                         return ret;
427         }
428
429         return 0;
430 }
431
432 static void mt7601u_free_rx(struct mt7601u_dev *dev)
433 {
434         int i;
435
436         for (i = 0; i < dev->rx_q.entries; i++) {
437                 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
438                 usb_free_urb(dev->rx_q.e[i].urb);
439         }
440 }
441
442 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
443 {
444         int i;
445
446         memset(&dev->rx_q, 0, sizeof(dev->rx_q));
447         dev->rx_q.dev = dev;
448         dev->rx_q.entries = N_RX_ENTRIES;
449
450         for (i = 0; i < N_RX_ENTRIES; i++) {
451                 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
452                 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
453
454                 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
455                         return -ENOMEM;
456         }
457
458         return 0;
459 }
460
461 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
462 {
463         int i;
464
465         for (i = 0; i < q->entries; i++)  {
466                 usb_poison_urb(q->e[i].urb);
467                 if (q->e[i].skb)
468                         mt7601u_tx_status(q->dev, q->e[i].skb);
469                 usb_free_urb(q->e[i].urb);
470         }
471 }
472
473 static void mt7601u_free_tx(struct mt7601u_dev *dev)
474 {
475         int i;
476
477         if (!dev->tx_q)
478                 return;
479
480         for (i = 0; i < __MT_EP_OUT_MAX; i++)
481                 mt7601u_free_tx_queue(&dev->tx_q[i]);
482 }
483
484 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
485                                   struct mt7601u_tx_queue *q)
486 {
487         int i;
488
489         q->dev = dev;
490         q->entries = N_TX_ENTRIES;
491
492         for (i = 0; i < N_TX_ENTRIES; i++) {
493                 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
494                 if (!q->e[i].urb)
495                         return -ENOMEM;
496         }
497
498         return 0;
499 }
500
501 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
502 {
503         int i;
504
505         dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
506                                  sizeof(*dev->tx_q), GFP_KERNEL);
507         if (!dev->tx_q)
508                 return -ENOMEM;
509
510         for (i = 0; i < __MT_EP_OUT_MAX; i++)
511                 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
512                         return -ENOMEM;
513
514         return 0;
515 }
516
517 int mt7601u_dma_init(struct mt7601u_dev *dev)
518 {
519         int ret = -ENOMEM;
520
521         tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
522         tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
523
524         ret = mt7601u_alloc_tx(dev);
525         if (ret)
526                 goto err;
527         ret = mt7601u_alloc_rx(dev);
528         if (ret)
529                 goto err;
530
531         ret = mt7601u_submit_rx(dev);
532         if (ret)
533                 goto err;
534
535         return 0;
536 err:
537         mt7601u_dma_cleanup(dev);
538         return ret;
539 }
540
541 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
542 {
543         mt7601u_kill_rx(dev);
544
545         tasklet_kill(&dev->rx_tasklet);
546
547         mt7601u_free_rx(dev);
548         mt7601u_free_tx(dev);
549
550         tasklet_kill(&dev->tx_tasklet);
551 }