staging: wfx: pspoll_mask make no sense
[linux-2.6-microblaze.git] / drivers / staging / wfx / queue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator.
4  *
5  * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6  * Copyright (c) 2010, ST-Ericsson
7  */
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
10
11 #include "queue.h"
12 #include "wfx.h"
13 #include "sta.h"
14 #include "data_tx.h"
15
16 void wfx_tx_lock(struct wfx_dev *wdev)
17 {
18         atomic_inc(&wdev->tx_lock);
19 }
20
21 void wfx_tx_unlock(struct wfx_dev *wdev)
22 {
23         int tx_lock = atomic_dec_return(&wdev->tx_lock);
24
25         WARN(tx_lock < 0, "inconsistent tx_lock value");
26         if (!tx_lock)
27                 wfx_bh_request_tx(wdev);
28 }
29
30 void wfx_tx_flush(struct wfx_dev *wdev)
31 {
32         int ret;
33
34         // Do not wait for any reply if chip is frozen
35         if (wdev->chip_frozen)
36                 return;
37
38         mutex_lock(&wdev->hif_cmd.lock);
39         ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
40                                  !wdev->hif.tx_buffers_used,
41                                  msecs_to_jiffies(3000));
42         if (!ret) {
43                 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
44                          wdev->hif.tx_buffers_used);
45                 wfx_pending_dump_old_frames(wdev, 3000);
46                 // FIXME: drop pending frames here
47                 wdev->chip_frozen = 1;
48         }
49         mutex_unlock(&wdev->hif_cmd.lock);
50 }
51
52 void wfx_tx_lock_flush(struct wfx_dev *wdev)
53 {
54         wfx_tx_lock(wdev);
55         wfx_tx_flush(wdev);
56 }
57
58 void wfx_tx_queues_lock(struct wfx_dev *wdev)
59 {
60         int i;
61         struct wfx_queue *queue;
62
63         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
64                 queue = &wdev->tx_queue[i];
65                 spin_lock_bh(&queue->queue.lock);
66                 if (queue->tx_locked_cnt++ == 0)
67                         ieee80211_stop_queue(wdev->hw, queue->queue_id);
68                 spin_unlock_bh(&queue->queue.lock);
69         }
70 }
71
72 void wfx_tx_queues_unlock(struct wfx_dev *wdev)
73 {
74         int i;
75         struct wfx_queue *queue;
76
77         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
78                 queue = &wdev->tx_queue[i];
79                 spin_lock_bh(&queue->queue.lock);
80                 WARN(!queue->tx_locked_cnt, "queue already unlocked");
81                 if (--queue->tx_locked_cnt == 0)
82                         ieee80211_wake_queue(wdev->hw, queue->queue_id);
83                 spin_unlock_bh(&queue->queue.lock);
84         }
85 }
86
87 /* If successful, LOCKS the TX queue! */
88 void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
89 {
90         int i;
91         bool done;
92         struct wfx_queue *queue;
93         struct sk_buff *item;
94         struct wfx_dev *wdev = wvif->wdev;
95         struct hif_msg *hif;
96
97         if (wvif->wdev->chip_frozen) {
98                 wfx_tx_lock_flush(wdev);
99                 wfx_tx_queues_clear(wdev);
100                 return;
101         }
102
103         do {
104                 done = true;
105                 wfx_tx_lock_flush(wdev);
106                 for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
107                         queue = &wdev->tx_queue[i];
108                         spin_lock_bh(&queue->queue.lock);
109                         skb_queue_walk(&queue->queue, item) {
110                                 hif = (struct hif_msg *) item->data;
111                                 if (hif->interface == wvif->id)
112                                         done = false;
113                         }
114                         spin_unlock_bh(&queue->queue.lock);
115                 }
116                 if (!done) {
117                         wfx_tx_unlock(wdev);
118                         msleep(20);
119                 }
120         } while (!done);
121 }
122
123 static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
124                                struct sk_buff_head *gc_list)
125 {
126         int i;
127         struct sk_buff *item;
128         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
129
130         spin_lock_bh(&queue->queue.lock);
131         while ((item = __skb_dequeue(&queue->queue)) != NULL)
132                 skb_queue_head(gc_list, item);
133         spin_lock_bh(&stats->pending.lock);
134         for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
135                 stats->link_map_cache[i] -= queue->link_map_cache[i];
136                 queue->link_map_cache[i] = 0;
137         }
138         spin_unlock_bh(&stats->pending.lock);
139         spin_unlock_bh(&queue->queue.lock);
140 }
141
142 void wfx_tx_queues_clear(struct wfx_dev *wdev)
143 {
144         int i;
145         struct sk_buff *item;
146         struct sk_buff_head gc_list;
147         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
148
149         skb_queue_head_init(&gc_list);
150         for (i = 0; i < IEEE80211_NUM_ACS; ++i)
151                 wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
152         wake_up(&stats->wait_link_id_empty);
153         while ((item = skb_dequeue(&gc_list)) != NULL)
154                 wfx_skb_dtor(wdev, item);
155 }
156
157 void wfx_tx_queues_init(struct wfx_dev *wdev)
158 {
159         int i;
160
161         memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
162         memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
163         skb_queue_head_init(&wdev->tx_queue_stats.pending);
164         init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
165
166         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
167                 wdev->tx_queue[i].queue_id = i;
168                 skb_queue_head_init(&wdev->tx_queue[i].queue);
169         }
170 }
171
172 void wfx_tx_queues_deinit(struct wfx_dev *wdev)
173 {
174         WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
175         wfx_tx_queues_clear(wdev);
176 }
177
178 size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
179                                    u32 link_id_map)
180 {
181         size_t ret;
182         int i, bit;
183
184         if (!link_id_map)
185                 return 0;
186
187         spin_lock_bh(&queue->queue.lock);
188         if (link_id_map == (u32)-1) {
189                 ret = skb_queue_len(&queue->queue);
190         } else {
191                 ret = 0;
192                 for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
193                      ++i, bit <<= 1) {
194                         if (link_id_map & bit)
195                                 ret += queue->link_map_cache[i];
196                 }
197         }
198         spin_unlock_bh(&queue->queue.lock);
199         return ret;
200 }
201
202 void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
203                       struct sk_buff *skb)
204 {
205         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
206         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
207
208         WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
209         spin_lock_bh(&queue->queue.lock);
210         __skb_queue_tail(&queue->queue, skb);
211
212         ++queue->link_map_cache[tx_priv->link_id];
213
214         spin_lock_bh(&stats->pending.lock);
215         ++stats->link_map_cache[tx_priv->link_id];
216         spin_unlock_bh(&stats->pending.lock);
217         spin_unlock_bh(&queue->queue.lock);
218 }
219
220 static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
221                                         struct wfx_queue *queue,
222                                         u32 link_id_map)
223 {
224         struct sk_buff *skb = NULL;
225         struct sk_buff *item;
226         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
227         struct wfx_tx_priv *tx_priv;
228         bool wakeup_stats = false;
229
230         spin_lock_bh(&queue->queue.lock);
231         skb_queue_walk(&queue->queue, item) {
232                 tx_priv = wfx_skb_tx_priv(item);
233                 if (link_id_map & BIT(tx_priv->link_id)) {
234                         skb = item;
235                         break;
236                 }
237         }
238         WARN_ON(!skb);
239         if (skb) {
240                 tx_priv = wfx_skb_tx_priv(skb);
241                 tx_priv->xmit_timestamp = ktime_get();
242                 __skb_unlink(skb, &queue->queue);
243                 --queue->link_map_cache[tx_priv->link_id];
244
245                 spin_lock_bh(&stats->pending.lock);
246                 __skb_queue_tail(&stats->pending, skb);
247                 if (!--stats->link_map_cache[tx_priv->link_id])
248                         wakeup_stats = true;
249                 spin_unlock_bh(&stats->pending.lock);
250         }
251         spin_unlock_bh(&queue->queue.lock);
252         if (wakeup_stats)
253                 wake_up(&stats->wait_link_id_empty);
254         return skb;
255 }
256
257 int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
258 {
259         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
260         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
261         struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
262
263         WARN_ON(skb_get_queue_mapping(skb) > 3);
264         spin_lock_bh(&queue->queue.lock);
265         ++queue->link_map_cache[tx_priv->link_id];
266
267         spin_lock_bh(&stats->pending.lock);
268         ++stats->link_map_cache[tx_priv->link_id];
269         __skb_unlink(skb, &stats->pending);
270         spin_unlock_bh(&stats->pending.lock);
271         __skb_queue_tail(&queue->queue, skb);
272         spin_unlock_bh(&queue->queue.lock);
273         return 0;
274 }
275
276 int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
277 {
278         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
279
280         spin_lock_bh(&stats->pending.lock);
281         __skb_unlink(skb, &stats->pending);
282         spin_unlock_bh(&stats->pending.lock);
283         wfx_skb_dtor(wdev, skb);
284
285         return 0;
286 }
287
288 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
289 {
290         struct sk_buff *skb;
291         struct hif_req_tx *req;
292         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
293
294         spin_lock_bh(&stats->pending.lock);
295         skb_queue_walk(&stats->pending, skb) {
296                 req = wfx_skb_txreq(skb);
297                 if (req->packet_id == packet_id) {
298                         spin_unlock_bh(&stats->pending.lock);
299                         return skb;
300                 }
301         }
302         spin_unlock_bh(&stats->pending.lock);
303         WARN(1, "cannot find packet in pending queue");
304         return NULL;
305 }
306
307 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
308 {
309         struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
310         ktime_t now = ktime_get();
311         struct wfx_tx_priv *tx_priv;
312         struct hif_req_tx *req;
313         struct sk_buff *skb;
314         bool first = true;
315
316         spin_lock_bh(&stats->pending.lock);
317         skb_queue_walk(&stats->pending, skb) {
318                 tx_priv = wfx_skb_tx_priv(skb);
319                 req = wfx_skb_txreq(skb);
320                 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
321                                                   limit_ms))) {
322                         if (first) {
323                                 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
324                                          limit_ms);
325                                 first = false;
326                         }
327                         dev_info(wdev->dev, "   id %08x sent %lldms ago\n",
328                                  req->packet_id,
329                                  ktime_ms_delta(now, tx_priv->xmit_timestamp));
330                 }
331         }
332         spin_unlock_bh(&stats->pending.lock);
333 }
334
335 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
336                                           struct sk_buff *skb)
337 {
338         ktime_t now = ktime_get();
339         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
340
341         return ktime_us_delta(now, tx_priv->xmit_timestamp);
342 }
343
344 bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
345 {
346         int i;
347         struct sk_buff_head *queue;
348         bool ret = true;
349
350         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
351                 queue = &wdev->tx_queue[i].queue;
352                 spin_lock_bh(&queue->lock);
353                 if (!skb_queue_empty(queue))
354                         ret = false;
355                 spin_unlock_bh(&queue->lock);
356         }
357         return ret;
358 }
359
360 static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
361                                struct wfx_queue *queue)
362 {
363         bool handled = false;
364         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
365         struct hif_req_tx *req = wfx_skb_txreq(skb);
366         struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
367
368         enum {
369                 do_probe,
370                 do_drop,
371                 do_wep,
372                 do_tx,
373         } action = do_tx;
374
375         switch (wvif->vif->type) {
376         case NL80211_IFTYPE_STATION:
377                 if (wvif->state < WFX_STATE_PRE_STA)
378                         action = do_drop;
379                 break;
380         case NL80211_IFTYPE_AP:
381                 if (!wvif->state)
382                         action = do_drop;
383                 break;
384         case NL80211_IFTYPE_ADHOC:
385                 if (wvif->state != WFX_STATE_IBSS)
386                         action = do_drop;
387                 break;
388         case NL80211_IFTYPE_MONITOR:
389         default:
390                 action = do_drop;
391                 break;
392         }
393
394         if (action == do_tx) {
395                 if (ieee80211_is_nullfunc(frame->frame_control)) {
396                         mutex_lock(&wvif->bss_loss_lock);
397                         if (wvif->bss_loss_state) {
398                                 wvif->bss_loss_confirm_id = req->packet_id;
399                                 req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
400                         }
401                         mutex_unlock(&wvif->bss_loss_lock);
402                 } else if (ieee80211_has_protected(frame->frame_control) &&
403                            tx_priv->hw_key &&
404                            tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
405                            (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
406                             tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
407                         action = do_wep;
408                 }
409         }
410
411         switch (action) {
412         case do_drop:
413                 wfx_pending_remove(wvif->wdev, skb);
414                 handled = true;
415                 break;
416         case do_wep:
417                 wfx_tx_lock(wvif->wdev);
418                 WARN_ON(wvif->wep_pending_skb);
419                 wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
420                 wvif->wep_pending_skb = skb;
421                 if (!schedule_work(&wvif->wep_key_work))
422                         wfx_tx_unlock(wvif->wdev);
423                 handled = true;
424                 break;
425         case do_tx:
426                 break;
427         default:
428                 /* Do nothing */
429                 break;
430         }
431         return handled;
432 }
433
434 static int wfx_get_prio_queue(struct wfx_vif *wvif,
435                                  u32 tx_allowed_mask, int *total)
436 {
437         static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
438                 BIT(WFX_LINK_ID_UAPSD);
439         const struct ieee80211_tx_queue_params *edca;
440         unsigned int score, best = -1;
441         int winner = -1;
442         int i;
443
444         /* search for a winner using edca params */
445         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
446                 int queued;
447
448                 edca = &wvif->edca_params[i];
449                 queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
450                                 tx_allowed_mask);
451                 if (!queued)
452                         continue;
453                 *total += queued;
454                 score = ((edca->aifs + edca->cw_min) << 16) +
455                         ((edca->cw_max - edca->cw_min) *
456                          (get_random_int() & 0xFFFF));
457                 if (score < best && (winner < 0 || i != 3)) {
458                         best = score;
459                         winner = i;
460                 }
461         }
462
463         /* override winner if bursting */
464         if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
465             winner != wvif->wdev->tx_burst_idx &&
466             !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
467                                          tx_allowed_mask & urgent) &&
468             wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
469                 winner = wvif->wdev->tx_burst_idx;
470
471         return winner;
472 }
473
474 static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
475                                      struct wfx_queue **queue_p,
476                                      u32 *tx_allowed_mask_p,
477                                      bool *more)
478 {
479         int idx;
480         u32 tx_allowed_mask;
481         int total = 0;
482
483         /* Search for a queue with multicast frames buffered */
484         if (wvif->mcast_tx) {
485                 tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
486                 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
487                 if (idx >= 0) {
488                         *more = total > 1;
489                         goto found;
490                 }
491         }
492
493         /* Search for unicast traffic */
494         tx_allowed_mask = ~wvif->sta_asleep_mask;
495         tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
496         if (wvif->sta_asleep_mask)
497                 tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
498         else
499                 tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
500         idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
501         if (idx < 0)
502                 return -ENOENT;
503
504 found:
505         *queue_p = &wvif->wdev->tx_queue[idx];
506         *tx_allowed_mask_p = tx_allowed_mask;
507         return 0;
508 }
509
510 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
511 {
512         struct sk_buff *skb;
513         struct hif_msg *hif = NULL;
514         struct hif_req_tx *req = NULL;
515         struct wfx_queue *queue = NULL;
516         struct wfx_queue *vif_queue = NULL;
517         u32 tx_allowed_mask = 0;
518         u32 vif_tx_allowed_mask = 0;
519         const struct wfx_tx_priv *tx_priv = NULL;
520         struct wfx_vif *wvif;
521         /* More is used only for broadcasts. */
522         bool more = false;
523         bool vif_more = false;
524         int not_found;
525         int burst;
526
527         for (;;) {
528                 int ret = -ENOENT;
529                 int queue_num;
530                 struct ieee80211_hdr *hdr;
531
532                 if (atomic_read(&wdev->tx_lock))
533                         return NULL;
534
535                 wvif = NULL;
536                 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
537                         spin_lock_bh(&wvif->ps_state_lock);
538
539                         not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
540                                                           &vif_tx_allowed_mask,
541                                                           &vif_more);
542
543                         if (wvif->mcast_buffered && (not_found || !vif_more) &&
544                                         (wvif->mcast_tx ||
545                                          !wvif->sta_asleep_mask)) {
546                                 wvif->mcast_buffered = false;
547                                 if (wvif->mcast_tx) {
548                                         wvif->mcast_tx = false;
549                                         schedule_work(&wvif->mcast_stop_work);
550                                 }
551                         }
552
553                         spin_unlock_bh(&wvif->ps_state_lock);
554
555                         if (vif_more) {
556                                 more = true;
557                                 tx_allowed_mask = vif_tx_allowed_mask;
558                                 queue = vif_queue;
559                                 ret = 0;
560                                 break;
561                         } else if (!not_found) {
562                                 if (queue && queue != vif_queue)
563                                         dev_info(wdev->dev, "vifs disagree about queue priority\n");
564                                 tx_allowed_mask |= vif_tx_allowed_mask;
565                                 queue = vif_queue;
566                                 ret = 0;
567                         }
568                 }
569
570                 if (ret)
571                         return NULL;
572
573                 queue_num = queue - wdev->tx_queue;
574
575                 skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
576                 if (!skb)
577                         continue;
578                 tx_priv = wfx_skb_tx_priv(skb);
579                 hif = (struct hif_msg *) skb->data;
580                 wvif = wdev_to_wvif(wdev, hif->interface);
581                 WARN_ON(!wvif);
582
583                 if (hif_handle_tx_data(wvif, skb, queue))
584                         continue;  /* Handled by WSM */
585
586                 /* allow bursting if txop is set */
587                 if (wvif->edca_params[queue_num].txop)
588                         burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
589                 else
590                         burst = 1;
591
592                 /* store index of bursting queue */
593                 if (burst > 1)
594                         wdev->tx_burst_idx = queue_num;
595                 else
596                         wdev->tx_burst_idx = -1;
597
598                 /* more buffered multicast/broadcast frames
599                  *  ==> set MoreData flag in IEEE 802.11 header
600                  *  to inform PS STAs
601                  */
602                 if (more) {
603                         req = (struct hif_req_tx *) hif->body;
604                         hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
605                         hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
606                 }
607                 return hif;
608         }
609 }