Merge tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / drivers / staging / wfx / queue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator.
4  *
5  * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6  * Copyright (c) 2010, ST-Ericsson
7  */
8 #include <net/mac80211.h>
9
10 #include "wfx.h"
11 #include "traces.h"
12
13 void wfx_tx_lock(struct wfx_dev *wdev)
14 {
15         atomic_inc(&wdev->tx_lock);
16 }
17
18 void wfx_tx_unlock(struct wfx_dev *wdev)
19 {
20         int tx_lock = atomic_dec_return(&wdev->tx_lock);
21
22         WARN(tx_lock < 0, "inconsistent tx_lock value");
23         if (!tx_lock)
24                 wfx_bh_request_tx(wdev);
25 }
26
27 void wfx_tx_flush(struct wfx_dev *wdev)
28 {
29         int ret;
30
31         // Do not wait for any reply if chip is frozen
32         if (wdev->chip_frozen)
33                 return;
34
35         wfx_tx_lock(wdev);
36         mutex_lock(&wdev->hif_cmd.lock);
37         ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
38                                  !wdev->hif.tx_buffers_used,
39                                  msecs_to_jiffies(3000));
40         if (!ret) {
41                 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
42                          wdev->hif.tx_buffers_used);
43                 wfx_pending_dump_old_frames(wdev, 3000);
44                 // FIXME: drop pending frames here
45                 wdev->chip_frozen = true;
46         }
47         mutex_unlock(&wdev->hif_cmd.lock);
48         wfx_tx_unlock(wdev);
49 }
50
51 void wfx_tx_lock_flush(struct wfx_dev *wdev)
52 {
53         wfx_tx_lock(wdev);
54         wfx_tx_flush(wdev);
55 }
56
57 void wfx_tx_queues_init(struct wfx_vif *wvif)
58 {
59         // The device is in charge to respect the details of the QoS parameters.
60         // The driver just ensure that it roughtly respect the priorities to
61         // avoid any shortage.
62         const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
63         int i;
64
65         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
66                 skb_queue_head_init(&wvif->tx_queue[i].normal);
67                 skb_queue_head_init(&wvif->tx_queue[i].cab);
68                 wvif->tx_queue[i].priority = priorities[i];
69         }
70 }
71
72 void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
73 {
74         int i;
75
76         for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
77                 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
78                 WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].normal));
79                 WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].cab));
80         }
81 }
82
83 bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
84 {
85         return skb_queue_empty(&queue->normal) && skb_queue_empty(&queue->cab);
86 }
87
88 static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
89                                 struct sk_buff_head *skb_queue,
90                                 struct sk_buff_head *dropped)
91 {
92         struct sk_buff *skb, *tmp;
93
94         spin_lock_bh(&skb_queue->lock);
95         skb_queue_walk_safe(skb_queue, skb, tmp) {
96                 __skb_unlink(skb, skb_queue);
97                 skb_queue_head(dropped, skb);
98         }
99         spin_unlock_bh(&skb_queue->lock);
100 }
101
102 void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
103                        struct sk_buff_head *dropped)
104 {
105         __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
106         __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
107         wake_up(&wvif->wdev->tx_dequeue);
108 }
109
110 void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
111 {
112         struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
113         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
114
115         if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
116                 skb_queue_tail(&queue->cab, skb);
117         else
118                 skb_queue_tail(&queue->normal, skb);
119 }
120
121 void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
122 {
123         struct wfx_queue *queue;
124         struct wfx_vif *wvif;
125         struct hif_msg *hif;
126         struct sk_buff *skb;
127
128         WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
129              __func__);
130         while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
131                 hif = (struct hif_msg *)skb->data;
132                 wvif = wdev_to_wvif(wdev, hif->interface);
133                 if (wvif) {
134                         queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
135                         WARN_ON(skb_get_queue_mapping(skb) > 3);
136                         WARN_ON(!atomic_read(&queue->pending_frames));
137                         atomic_dec(&queue->pending_frames);
138                 }
139                 skb_queue_head(dropped, skb);
140         }
141 }
142
143 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
144 {
145         struct wfx_queue *queue;
146         struct hif_req_tx *req;
147         struct wfx_vif *wvif;
148         struct hif_msg *hif;
149         struct sk_buff *skb;
150
151         spin_lock_bh(&wdev->tx_pending.lock);
152         skb_queue_walk(&wdev->tx_pending, skb) {
153                 hif = (struct hif_msg *)skb->data;
154                 req = (struct hif_req_tx *)hif->body;
155                 if (req->packet_id != packet_id)
156                         continue;
157                 spin_unlock_bh(&wdev->tx_pending.lock);
158                 wvif = wdev_to_wvif(wdev, hif->interface);
159                 if (wvif) {
160                         queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
161                         WARN_ON(skb_get_queue_mapping(skb) > 3);
162                         WARN_ON(!atomic_read(&queue->pending_frames));
163                         atomic_dec(&queue->pending_frames);
164                 }
165                 skb_unlink(skb, &wdev->tx_pending);
166                 return skb;
167         }
168         spin_unlock_bh(&wdev->tx_pending.lock);
169         WARN(1, "cannot find packet in pending queue");
170         return NULL;
171 }
172
173 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
174 {
175         ktime_t now = ktime_get();
176         struct wfx_tx_priv *tx_priv;
177         struct hif_req_tx *req;
178         struct sk_buff *skb;
179         bool first = true;
180
181         spin_lock_bh(&wdev->tx_pending.lock);
182         skb_queue_walk(&wdev->tx_pending, skb) {
183                 tx_priv = wfx_skb_tx_priv(skb);
184                 req = wfx_skb_txreq(skb);
185                 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
186                                                   limit_ms))) {
187                         if (first) {
188                                 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
189                                          limit_ms);
190                                 first = false;
191                         }
192                         dev_info(wdev->dev, "   id %08x sent %lldms ago\n",
193                                  req->packet_id,
194                                  ktime_ms_delta(now, tx_priv->xmit_timestamp));
195                 }
196         }
197         spin_unlock_bh(&wdev->tx_pending.lock);
198 }
199
200 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
201                                           struct sk_buff *skb)
202 {
203         ktime_t now = ktime_get();
204         struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
205
206         return ktime_us_delta(now, tx_priv->xmit_timestamp);
207 }
208
209 bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
210 {
211         int i;
212
213         if (wvif->vif->type != NL80211_IFTYPE_AP)
214                 return false;
215         for (i = 0; i < IEEE80211_NUM_ACS; ++i)
216                 // Note: since only AP can have mcast frames in queue and only
217                 // one vif can be AP, all queued frames has same interface id
218                 if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
219                         return true;
220         return false;
221 }
222
223 static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
224 {
225         return atomic_read(&queue->pending_frames) * queue->priority;
226 }
227
228 static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
229 {
230         struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
231         int i, j, num_queues = 0;
232         struct wfx_vif *wvif;
233         struct hif_msg *hif;
234         struct sk_buff *skb;
235
236         // sort the queues
237         wvif = NULL;
238         while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
239                 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
240                         WARN_ON(num_queues >= ARRAY_SIZE(queues));
241                         queues[num_queues] = &wvif->tx_queue[i];
242                         for (j = num_queues; j > 0; j--)
243                                 if (wfx_tx_queue_get_weight(queues[j]) <
244                                     wfx_tx_queue_get_weight(queues[j - 1]))
245                                         swap(queues[j - 1], queues[j]);
246                         num_queues++;
247                 }
248         }
249
250         wvif = NULL;
251         while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
252                 if (!wvif->after_dtim_tx_allowed)
253                         continue;
254                 for (i = 0; i < num_queues; i++) {
255                         skb = skb_dequeue(&queues[i]->cab);
256                         if (!skb)
257                                 continue;
258                         // Note: since only AP can have mcast frames in queue
259                         // and only one vif can be AP, all queued frames has
260                         // same interface id
261                         hif = (struct hif_msg *)skb->data;
262                         WARN_ON(hif->interface != wvif->id);
263                         WARN_ON(queues[i] !=
264                                 &wvif->tx_queue[skb_get_queue_mapping(skb)]);
265                         atomic_inc(&queues[i]->pending_frames);
266                         trace_queues_stats(wdev, queues[i]);
267                         return skb;
268                 }
269                 // No more multicast to sent
270                 wvif->after_dtim_tx_allowed = false;
271                 schedule_work(&wvif->update_tim_work);
272         }
273
274         for (i = 0; i < num_queues; i++) {
275                 skb = skb_dequeue(&queues[i]->normal);
276                 if (skb) {
277                         atomic_inc(&queues[i]->pending_frames);
278                         trace_queues_stats(wdev, queues[i]);
279                         return skb;
280                 }
281         }
282         return NULL;
283 }
284
285 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
286 {
287         struct wfx_tx_priv *tx_priv;
288         struct sk_buff *skb;
289
290         if (atomic_read(&wdev->tx_lock))
291                 return NULL;
292         skb = wfx_tx_queues_get_skb(wdev);
293         if (!skb)
294                 return NULL;
295         skb_queue_tail(&wdev->tx_pending, skb);
296         wake_up(&wdev->tx_dequeue);
297         tx_priv = wfx_skb_tx_priv(skb);
298         tx_priv->xmit_timestamp = ktime_get();
299         return (struct hif_msg *)skb->data;
300 }