Merge tag 'audit-pr-20191126' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoor...
[linux-2.6-microblaze.git] / drivers / staging / wfx / queue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator.
4  *
5  * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6  * Copyright (c) 2010, ST-Ericsson
7  */
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
10
11 #include "queue.h"
12 #include "wfx.h"
13 #include "sta.h"
14 #include "data_tx.h"
15
16 void wfx_tx_lock(struct wfx_dev *wdev)
17 {
18         atomic_inc(&wdev->tx_lock);
19 }
20
21 void wfx_tx_unlock(struct wfx_dev *wdev)
22 {
23         int tx_lock = atomic_dec_return(&wdev->tx_lock);
24
25         WARN(tx_lock < 0, "inconsistent tx_lock value");
26         if (!tx_lock)
27                 wfx_bh_request_tx(wdev);
28 }
29
30 void wfx_tx_flush(struct wfx_dev *wdev)
31 {
32         int ret;
33
34         WARN(!atomic_read(&wdev->tx_lock), "tx_lock is not locked");
35
36         // Do not wait for any reply if chip is frozen
37         if (wdev->chip_frozen)
38                 return;
39
40         mutex_lock(&wdev->hif_cmd.lock);
41         ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
42                                  !wdev->hif.tx_buffers_used,
43                                  msecs_to_jiffies(3000));
44         if (!ret) {
45                 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
46                          wdev->hif.tx_buffers_used);
47                 wfx_pending_dump_old_frames(wdev, 3000);
48                 // FIXME: drop pending frames here
49                 wdev->chip_frozen = 1;
50         }
51         mutex_unlock(&wdev->hif_cmd.lock);
52 }
53
54 void wfx_tx_lock_flush(struct wfx_dev *wdev)
55 {
56         wfx_tx_lock(wdev);
57         wfx_tx_flush(wdev);
58 }
59
60 void wfx_tx_queues_lock(struct wfx_dev *wdev)
61 {
62         int i;
63         struct wfx_queue *queue;
64
65         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
66                 queue = &wdev->tx_queue[i];
67                 spin_lock_bh(&queue->queue.lock);
68                 if (queue->tx_locked_cnt++ == 0)
69                         ieee80211_stop_queue(wdev->hw, queue->queue_id);
70                 spin_unlock_bh(&queue->queue.lock);
71         }
72 }
73
74 void wfx_tx_queues_unlock(struct wfx_dev *wdev)
75 {
76         int i;
77         struct wfx_queue *queue;
78
79         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
80                 queue = &wdev->tx_queue[i];
81                 spin_lock_bh(&queue->queue.lock);
82                 WARN(!queue->tx_locked_cnt, "queue already unlocked");
83                 if (--queue->tx_locked_cnt == 0)
84                         ieee80211_wake_queue(wdev->hw, queue->queue_id);
85                 spin_unlock_bh(&queue->queue.lock);
86         }
87 }
88
89 /* If successful, LOCKS the TX queue! */
90 void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
91 {
92         int i;
93         bool done;
94         struct wfx_queue *queue;
95         struct sk_buff *item;
96         struct wfx_dev *wdev = wvif->wdev;
97         struct hif_msg *hif;
98
99         if (wvif->wdev->chip_frozen) {
100                 wfx_tx_lock_flush(wdev);
101                 wfx_tx_queues_clear(wdev);
102                 return;
103         }
104
105         do {
106                 done = true;
107                 wfx_tx_lock_flush(wdev);
108                 for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
109                         queue = &wdev->tx_queue[i];
110                         spin_lock_bh(&queue->queue.lock);
111                         skb_queue_walk(&queue->queue, item) {
112                                 hif = (struct hif_msg *) item->data;
113                                 if (hif->interface == wvif->id)
114                                         done = false;
115                         }
116                         spin_unlock_bh(&queue->queue.lock);
117                 }
118                 if (!done) {
119                         wfx_tx_unlock(wdev);
120                         msleep(20);
121                 }
122         } while (!done);
123 }
124
125 static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
126                                struct sk_buff_head *gc_list)
127 {
128         int i;
129         struct sk_buff *item;
130         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
131
132         spin_lock_bh(&queue->queue.lock);
133         while ((item = __skb_dequeue(&queue->queue)) != NULL)
134                 skb_queue_head(gc_list, item);
135         spin_lock_bh(&stats->pending.lock);
136         for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
137                 stats->link_map_cache[i] -= queue->link_map_cache[i];
138                 queue->link_map_cache[i] = 0;
139         }
140         spin_unlock_bh(&stats->pending.lock);
141         spin_unlock_bh(&queue->queue.lock);
142 }
143
144 void wfx_tx_queues_clear(struct wfx_dev *wdev)
145 {
146         int i;
147         struct sk_buff *item;
148         struct sk_buff_head gc_list;
149         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
150
151         skb_queue_head_init(&gc_list);
152         for (i = 0; i < IEEE80211_NUM_ACS; ++i)
153                 wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
154         wake_up(&stats->wait_link_id_empty);
155         while ((item = skb_dequeue(&gc_list)) != NULL)
156                 wfx_skb_dtor(wdev, item);
157 }
158
159 void wfx_tx_queues_init(struct wfx_dev *wdev)
160 {
161         int i;
162
163         memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
164         memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
165         skb_queue_head_init(&wdev->tx_queue_stats.pending);
166         init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
167
168         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
169                 wdev->tx_queue[i].queue_id = i;
170                 skb_queue_head_init(&wdev->tx_queue[i].queue);
171         }
172 }
173
174 void wfx_tx_queues_deinit(struct wfx_dev *wdev)
175 {
176         WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
177         wfx_tx_queues_clear(wdev);
178 }
179
180 size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
181                                    u32 link_id_map)
182 {
183         size_t ret;
184         int i, bit;
185
186         if (!link_id_map)
187                 return 0;
188
189         spin_lock_bh(&queue->queue.lock);
190         if (link_id_map == (u32)-1) {
191                 ret = skb_queue_len(&queue->queue);
192         } else {
193                 ret = 0;
194                 for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
195                      ++i, bit <<= 1) {
196                         if (link_id_map & bit)
197                                 ret += queue->link_map_cache[i];
198                 }
199         }
200         spin_unlock_bh(&queue->queue.lock);
201         return ret;
202 }
203
204 void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
205                       struct sk_buff *skb)
206 {
207         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
208         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
209
210         WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
211         spin_lock_bh(&queue->queue.lock);
212         __skb_queue_tail(&queue->queue, skb);
213
214         ++queue->link_map_cache[tx_priv->link_id];
215
216         spin_lock_bh(&stats->pending.lock);
217         ++stats->link_map_cache[tx_priv->link_id];
218         spin_unlock_bh(&stats->pending.lock);
219         spin_unlock_bh(&queue->queue.lock);
220 }
221
222 static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
223                                         struct wfx_queue *queue,
224                                         u32 link_id_map)
225 {
226         struct sk_buff *skb = NULL;
227         struct sk_buff *item;
228         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
229         struct wfx_tx_priv *tx_priv;
230         bool wakeup_stats = false;
231
232         spin_lock_bh(&queue->queue.lock);
233         skb_queue_walk(&queue->queue, item) {
234                 tx_priv = wfx_skb_tx_priv(item);
235                 if (link_id_map & BIT(tx_priv->link_id)) {
236                         skb = item;
237                         break;
238                 }
239         }
240         WARN_ON(!skb);
241         if (skb) {
242                 tx_priv = wfx_skb_tx_priv(skb);
243                 tx_priv->xmit_timestamp = ktime_get();
244                 __skb_unlink(skb, &queue->queue);
245                 --queue->link_map_cache[tx_priv->link_id];
246
247                 spin_lock_bh(&stats->pending.lock);
248                 __skb_queue_tail(&stats->pending, skb);
249                 if (!--stats->link_map_cache[tx_priv->link_id])
250                         wakeup_stats = true;
251                 spin_unlock_bh(&stats->pending.lock);
252         }
253         spin_unlock_bh(&queue->queue.lock);
254         if (wakeup_stats)
255                 wake_up(&stats->wait_link_id_empty);
256         return skb;
257 }
258
259 int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
260 {
261         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
262         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
263         struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
264
265         WARN_ON(skb_get_queue_mapping(skb) > 3);
266         spin_lock_bh(&queue->queue.lock);
267         ++queue->link_map_cache[tx_priv->link_id];
268
269         spin_lock_bh(&stats->pending.lock);
270         ++stats->link_map_cache[tx_priv->link_id];
271         __skb_unlink(skb, &stats->pending);
272         spin_unlock_bh(&stats->pending.lock);
273         __skb_queue_tail(&queue->queue, skb);
274         spin_unlock_bh(&queue->queue.lock);
275         return 0;
276 }
277
278 int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
279 {
280         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
281
282         spin_lock_bh(&stats->pending.lock);
283         __skb_unlink(skb, &stats->pending);
284         spin_unlock_bh(&stats->pending.lock);
285         wfx_skb_dtor(wdev, skb);
286
287         return 0;
288 }
289
290 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
291 {
292         struct sk_buff *skb;
293         struct hif_req_tx *req;
294         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
295
296         spin_lock_bh(&stats->pending.lock);
297         skb_queue_walk(&stats->pending, skb) {
298                 req = wfx_skb_txreq(skb);
299                 if (req->packet_id == packet_id) {
300                         spin_unlock_bh(&stats->pending.lock);
301                         return skb;
302                 }
303         }
304         spin_unlock_bh(&stats->pending.lock);
305         WARN(1, "cannot find packet in pending queue");
306         return NULL;
307 }
308
309 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
310 {
311         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
312         ktime_t now = ktime_get();
313         struct wfx_tx_priv *tx_priv;
314         struct hif_req_tx *req;
315         struct sk_buff *skb;
316         bool first = true;
317
318         spin_lock_bh(&stats->pending.lock);
319         skb_queue_walk(&stats->pending, skb) {
320                 tx_priv = wfx_skb_tx_priv(skb);
321                 req = wfx_skb_txreq(skb);
322                 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
323                                                   limit_ms))) {
324                         if (first) {
325                                 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
326                                          limit_ms);
327                                 first = false;
328                         }
329                         dev_info(wdev->dev, "   id %08x sent %lldms ago\n",
330                                  req->packet_id,
331                                  ktime_ms_delta(now, tx_priv->xmit_timestamp));
332                 }
333         }
334         spin_unlock_bh(&stats->pending.lock);
335 }
336
337 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
338                                           struct sk_buff *skb)
339 {
340         ktime_t now = ktime_get();
341         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
342
343         return ktime_us_delta(now, tx_priv->xmit_timestamp);
344 }
345
346 bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
347 {
348         int i;
349         struct sk_buff_head *queue;
350         bool ret = true;
351
352         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
353                 queue = &wdev->tx_queue[i].queue;
354                 spin_lock_bh(&queue->lock);
355                 if (!skb_queue_empty(queue))
356                         ret = false;
357                 spin_unlock_bh(&queue->lock);
358         }
359         return ret;
360 }
361
362 static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
363                                struct wfx_queue *queue)
364 {
365         bool handled = false;
366         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
367         struct hif_req_tx *req = wfx_skb_txreq(skb);
368         struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
369
370         enum {
371                 do_probe,
372                 do_drop,
373                 do_wep,
374                 do_tx,
375         } action = do_tx;
376
377         switch (wvif->vif->type) {
378         case NL80211_IFTYPE_STATION:
379                 if (wvif->state < WFX_STATE_PRE_STA)
380                         action = do_drop;
381                 break;
382         case NL80211_IFTYPE_AP:
383                 if (!wvif->state) {
384                         action = do_drop;
385                 } else if (!(BIT(tx_priv->raw_link_id) &
386                              (BIT(0) | wvif->link_id_map))) {
387                         dev_warn(wvif->wdev->dev, "a frame with expired link-id is dropped\n");
388                         action = do_drop;
389                 }
390                 break;
391         case NL80211_IFTYPE_ADHOC:
392                 if (wvif->state != WFX_STATE_IBSS)
393                         action = do_drop;
394                 break;
395         case NL80211_IFTYPE_MONITOR:
396         default:
397                 action = do_drop;
398                 break;
399         }
400
401         if (action == do_tx) {
402                 if (ieee80211_is_nullfunc(frame->frame_control)) {
403                         mutex_lock(&wvif->bss_loss_lock);
404                         if (wvif->bss_loss_state) {
405                                 wvif->bss_loss_confirm_id = req->packet_id;
406                                 req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
407                         }
408                         mutex_unlock(&wvif->bss_loss_lock);
409                 } else if (ieee80211_has_protected(frame->frame_control) &&
410                            tx_priv->hw_key &&
411                            tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
412                            (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
413                             tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
414                         action = do_wep;
415                 }
416         }
417
418         switch (action) {
419         case do_drop:
420                 wfx_pending_remove(wvif->wdev, skb);
421                 handled = true;
422                 break;
423         case do_wep:
424                 wfx_tx_lock(wvif->wdev);
425                 wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
426                 wvif->wep_pending_skb = skb;
427                 if (!schedule_work(&wvif->wep_key_work))
428                         wfx_tx_unlock(wvif->wdev);
429                 handled = true;
430                 break;
431         case do_tx:
432                 break;
433         default:
434                 /* Do nothing */
435                 break;
436         }
437         return handled;
438 }
439
440 static int wfx_get_prio_queue(struct wfx_vif *wvif,
441                                  u32 tx_allowed_mask, int *total)
442 {
443         static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
444                 BIT(WFX_LINK_ID_UAPSD);
445         struct hif_req_edca_queue_params *edca;
446         unsigned int score, best = -1;
447         int winner = -1;
448         int i;
449
450         /* search for a winner using edca params */
451         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
452                 int queued;
453
454                 edca = &wvif->edca.params[i];
455                 queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
456                                 tx_allowed_mask);
457                 if (!queued)
458                         continue;
459                 *total += queued;
460                 score = ((edca->aifsn + edca->cw_min) << 16) +
461                         ((edca->cw_max - edca->cw_min) *
462                          (get_random_int() & 0xFFFF));
463                 if (score < best && (winner < 0 || i != 3)) {
464                         best = score;
465                         winner = i;
466                 }
467         }
468
469         /* override winner if bursting */
470         if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
471             winner != wvif->wdev->tx_burst_idx &&
472             !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
473                                          tx_allowed_mask & urgent) &&
474             wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
475                 winner = wvif->wdev->tx_burst_idx;
476
477         return winner;
478 }
479
480 static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
481                                      struct wfx_queue **queue_p,
482                                      u32 *tx_allowed_mask_p,
483                                      bool *more)
484 {
485         int idx;
486         u32 tx_allowed_mask;
487         int total = 0;
488
489         /* Search for a queue with multicast frames buffered */
490         if (wvif->mcast_tx) {
491                 tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
492                 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
493                 if (idx >= 0) {
494                         *more = total > 1;
495                         goto found;
496                 }
497         }
498
499         /* Search for unicast traffic */
500         tx_allowed_mask = ~wvif->sta_asleep_mask;
501         tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
502         if (wvif->sta_asleep_mask) {
503                 tx_allowed_mask |= wvif->pspoll_mask;
504                 tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
505         } else {
506                 tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
507         }
508         idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
509         if (idx < 0)
510                 return -ENOENT;
511
512 found:
513         *queue_p = &wvif->wdev->tx_queue[idx];
514         *tx_allowed_mask_p = tx_allowed_mask;
515         return 0;
516 }
517
518 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
519 {
520         struct sk_buff *skb;
521         struct hif_msg *hif = NULL;
522         struct hif_req_tx *req = NULL;
523         struct wfx_queue *queue = NULL;
524         struct wfx_queue *vif_queue = NULL;
525         u32 tx_allowed_mask = 0;
526         u32 vif_tx_allowed_mask = 0;
527         const struct wfx_tx_priv *tx_priv = NULL;
528         struct wfx_vif *wvif;
529         /* More is used only for broadcasts. */
530         bool more = false;
531         bool vif_more = false;
532         int not_found;
533         int burst;
534
535         for (;;) {
536                 int ret = -ENOENT;
537                 int queue_num;
538                 struct ieee80211_hdr *hdr;
539
540                 if (atomic_read(&wdev->tx_lock))
541                         return NULL;
542
543                 wvif = NULL;
544                 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
545                         spin_lock_bh(&wvif->ps_state_lock);
546
547                         not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
548                                                           &vif_tx_allowed_mask,
549                                                           &vif_more);
550
551                         if (wvif->mcast_buffered && (not_found || !vif_more) &&
552                                         (wvif->mcast_tx ||
553                                          !wvif->sta_asleep_mask)) {
554                                 wvif->mcast_buffered = false;
555                                 if (wvif->mcast_tx) {
556                                         wvif->mcast_tx = false;
557                                         schedule_work(&wvif->mcast_stop_work);
558                                 }
559                         }
560
561                         spin_unlock_bh(&wvif->ps_state_lock);
562
563                         if (vif_more) {
564                                 more = true;
565                                 tx_allowed_mask = vif_tx_allowed_mask;
566                                 queue = vif_queue;
567                                 ret = 0;
568                                 break;
569                         } else if (!not_found) {
570                                 if (queue && queue != vif_queue)
571                                         dev_info(wdev->dev, "vifs disagree about queue priority\n");
572                                 tx_allowed_mask |= vif_tx_allowed_mask;
573                                 queue = vif_queue;
574                                 ret = 0;
575                         }
576                 }
577
578                 if (ret)
579                         return NULL;
580
581                 queue_num = queue - wdev->tx_queue;
582
583                 skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
584                 if (!skb)
585                         continue;
586                 tx_priv = wfx_skb_tx_priv(skb);
587                 hif = (struct hif_msg *) skb->data;
588                 wvif = wdev_to_wvif(wdev, hif->interface);
589                 WARN_ON(!wvif);
590
591                 if (hif_handle_tx_data(wvif, skb, queue))
592                         continue;  /* Handled by WSM */
593
594                 wvif->pspoll_mask &= ~BIT(tx_priv->raw_link_id);
595
596                 /* allow bursting if txop is set */
597                 if (wvif->edca.params[queue_num].tx_op_limit)
598                         burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
599                 else
600                         burst = 1;
601
602                 /* store index of bursting queue */
603                 if (burst > 1)
604                         wdev->tx_burst_idx = queue_num;
605                 else
606                         wdev->tx_burst_idx = -1;
607
608                 /* more buffered multicast/broadcast frames
609                  *  ==> set MoreData flag in IEEE 802.11 header
610                  *  to inform PS STAs
611                  */
612                 if (more) {
613                         req = (struct hif_req_tx *) hif->body;
614                         hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
615                         hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
616                 }
617                 return hif;
618         }
619 }