treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[linux-2.6-microblaze.git] / drivers / net / wireless / st / cw1200 / queue.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
4  *
5  * Copyright (c) 2010, ST-Ericsson
6  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7  */
8
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
11 #include "queue.h"
12 #include "cw1200.h"
13 #include "debug.h"
14
15 /* private */ struct cw1200_queue_item
16 {
17         struct list_head        head;
18         struct sk_buff          *skb;
19         u32                     packet_id;
20         unsigned long           queue_timestamp;
21         unsigned long           xmit_timestamp;
22         struct cw1200_txpriv    txpriv;
23         u8                      generation;
24 };
25
26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
27 {
28         struct cw1200_queue_stats *stats = queue->stats;
29         if (queue->tx_locked_cnt++ == 0) {
30                 pr_debug("[TX] Queue %d is locked.\n",
31                          queue->queue_id);
32                 ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
33         }
34 }
35
36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
37 {
38         struct cw1200_queue_stats *stats = queue->stats;
39         BUG_ON(!queue->tx_locked_cnt);
40         if (--queue->tx_locked_cnt == 0) {
41                 pr_debug("[TX] Queue %d is unlocked.\n",
42                          queue->queue_id);
43                 ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
44         }
45 }
46
47 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
48                                          u8 *queue_id, u8 *item_generation,
49                                          u8 *item_id)
50 {
51         *item_id                = (packet_id >>  0) & 0xFF;
52         *item_generation        = (packet_id >>  8) & 0xFF;
53         *queue_id               = (packet_id >> 16) & 0xFF;
54         *queue_generation       = (packet_id >> 24) & 0xFF;
55 }
56
57 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
58                                             u8 item_generation, u8 item_id)
59 {
60         return ((u32)item_id << 0) |
61                 ((u32)item_generation << 8) |
62                 ((u32)queue_id << 16) |
63                 ((u32)queue_generation << 24);
64 }
65
66 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
67                                  struct list_head *gc_list)
68 {
69         struct cw1200_queue_item *item, *tmp;
70
71         list_for_each_entry_safe(item, tmp, gc_list, head) {
72                 list_del(&item->head);
73                 stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
74                 kfree(item);
75         }
76 }
77
78 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
79                                           struct cw1200_queue_item *item)
80 {
81         struct cw1200_queue_item *gc_item;
82         gc_item = kmalloc(sizeof(struct cw1200_queue_item),
83                         GFP_ATOMIC);
84         BUG_ON(!gc_item);
85         memcpy(gc_item, item, sizeof(struct cw1200_queue_item));
86         list_add_tail(&gc_item->head, gc_list);
87 }
88
89 static void __cw1200_queue_gc(struct cw1200_queue *queue,
90                               struct list_head *head,
91                               bool unlock)
92 {
93         struct cw1200_queue_stats *stats = queue->stats;
94         struct cw1200_queue_item *item = NULL, *tmp;
95         bool wakeup_stats = false;
96
97         list_for_each_entry_safe(item, tmp, &queue->queue, head) {
98                 if (jiffies - item->queue_timestamp < queue->ttl)
99                         break;
100                 --queue->num_queued;
101                 --queue->link_map_cache[item->txpriv.link_id];
102                 spin_lock_bh(&stats->lock);
103                 --stats->num_queued;
104                 if (!--stats->link_map_cache[item->txpriv.link_id])
105                         wakeup_stats = true;
106                 spin_unlock_bh(&stats->lock);
107                 cw1200_debug_tx_ttl(stats->priv);
108                 cw1200_queue_register_post_gc(head, item);
109                 item->skb = NULL;
110                 list_move_tail(&item->head, &queue->free_pool);
111         }
112
113         if (wakeup_stats)
114                 wake_up(&stats->wait_link_id_empty);
115
116         if (queue->overfull) {
117                 if (queue->num_queued <= (queue->capacity >> 1)) {
118                         queue->overfull = false;
119                         if (unlock)
120                                 __cw1200_queue_unlock(queue);
121                 } else if (item) {
122                         unsigned long tmo = item->queue_timestamp + queue->ttl;
123                         mod_timer(&queue->gc, tmo);
124                         cw1200_pm_stay_awake(&stats->priv->pm_state,
125                                              tmo - jiffies);
126                 }
127         }
128 }
129
130 static void cw1200_queue_gc(struct timer_list *t)
131 {
132         LIST_HEAD(list);
133         struct cw1200_queue *queue =
134                 from_timer(queue, t, gc);
135
136         spin_lock_bh(&queue->lock);
137         __cw1200_queue_gc(queue, &list, true);
138         spin_unlock_bh(&queue->lock);
139         cw1200_queue_post_gc(queue->stats, &list);
140 }
141
142 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
143                             size_t map_capacity,
144                             cw1200_queue_skb_dtor_t skb_dtor,
145                             struct cw1200_common *priv)
146 {
147         memset(stats, 0, sizeof(*stats));
148         stats->map_capacity = map_capacity;
149         stats->skb_dtor = skb_dtor;
150         stats->priv = priv;
151         spin_lock_init(&stats->lock);
152         init_waitqueue_head(&stats->wait_link_id_empty);
153
154         stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
155                                         GFP_KERNEL);
156         if (!stats->link_map_cache)
157                 return -ENOMEM;
158
159         return 0;
160 }
161
162 int cw1200_queue_init(struct cw1200_queue *queue,
163                       struct cw1200_queue_stats *stats,
164                       u8 queue_id,
165                       size_t capacity,
166                       unsigned long ttl)
167 {
168         size_t i;
169
170         memset(queue, 0, sizeof(*queue));
171         queue->stats = stats;
172         queue->capacity = capacity;
173         queue->queue_id = queue_id;
174         queue->ttl = ttl;
175         INIT_LIST_HEAD(&queue->queue);
176         INIT_LIST_HEAD(&queue->pending);
177         INIT_LIST_HEAD(&queue->free_pool);
178         spin_lock_init(&queue->lock);
179         timer_setup(&queue->gc, cw1200_queue_gc, 0);
180
181         queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
182                               GFP_KERNEL);
183         if (!queue->pool)
184                 return -ENOMEM;
185
186         queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
187                                         GFP_KERNEL);
188         if (!queue->link_map_cache) {
189                 kfree(queue->pool);
190                 queue->pool = NULL;
191                 return -ENOMEM;
192         }
193
194         for (i = 0; i < capacity; ++i)
195                 list_add_tail(&queue->pool[i].head, &queue->free_pool);
196
197         return 0;
198 }
199
200 int cw1200_queue_clear(struct cw1200_queue *queue)
201 {
202         int i;
203         LIST_HEAD(gc_list);
204         struct cw1200_queue_stats *stats = queue->stats;
205         struct cw1200_queue_item *item, *tmp;
206
207         spin_lock_bh(&queue->lock);
208         queue->generation++;
209         list_splice_tail_init(&queue->queue, &queue->pending);
210         list_for_each_entry_safe(item, tmp, &queue->pending, head) {
211                 WARN_ON(!item->skb);
212                 cw1200_queue_register_post_gc(&gc_list, item);
213                 item->skb = NULL;
214                 list_move_tail(&item->head, &queue->free_pool);
215         }
216         queue->num_queued = 0;
217         queue->num_pending = 0;
218
219         spin_lock_bh(&stats->lock);
220         for (i = 0; i < stats->map_capacity; ++i) {
221                 stats->num_queued -= queue->link_map_cache[i];
222                 stats->link_map_cache[i] -= queue->link_map_cache[i];
223                 queue->link_map_cache[i] = 0;
224         }
225         spin_unlock_bh(&stats->lock);
226         if (queue->overfull) {
227                 queue->overfull = false;
228                 __cw1200_queue_unlock(queue);
229         }
230         spin_unlock_bh(&queue->lock);
231         wake_up(&stats->wait_link_id_empty);
232         cw1200_queue_post_gc(stats, &gc_list);
233         return 0;
234 }
235
236 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
237 {
238         kfree(stats->link_map_cache);
239         stats->link_map_cache = NULL;
240 }
241
242 void cw1200_queue_deinit(struct cw1200_queue *queue)
243 {
244         cw1200_queue_clear(queue);
245         del_timer_sync(&queue->gc);
246         INIT_LIST_HEAD(&queue->free_pool);
247         kfree(queue->pool);
248         kfree(queue->link_map_cache);
249         queue->pool = NULL;
250         queue->link_map_cache = NULL;
251         queue->capacity = 0;
252 }
253
254 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
255                                    u32 link_id_map)
256 {
257         size_t ret;
258         int i, bit;
259         size_t map_capacity = queue->stats->map_capacity;
260
261         if (!link_id_map)
262                 return 0;
263
264         spin_lock_bh(&queue->lock);
265         if (link_id_map == (u32)-1) {
266                 ret = queue->num_queued - queue->num_pending;
267         } else {
268                 ret = 0;
269                 for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
270                         if (link_id_map & bit)
271                                 ret += queue->link_map_cache[i];
272                 }
273         }
274         spin_unlock_bh(&queue->lock);
275         return ret;
276 }
277
278 int cw1200_queue_put(struct cw1200_queue *queue,
279                      struct sk_buff *skb,
280                      struct cw1200_txpriv *txpriv)
281 {
282         int ret = 0;
283         struct cw1200_queue_stats *stats = queue->stats;
284
285         if (txpriv->link_id >= queue->stats->map_capacity)
286                 return -EINVAL;
287
288         spin_lock_bh(&queue->lock);
289         if (!WARN_ON(list_empty(&queue->free_pool))) {
290                 struct cw1200_queue_item *item = list_first_entry(
291                         &queue->free_pool, struct cw1200_queue_item, head);
292                 BUG_ON(item->skb);
293
294                 list_move_tail(&item->head, &queue->queue);
295                 item->skb = skb;
296                 item->txpriv = *txpriv;
297                 item->generation = 0;
298                 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
299                                                             queue->queue_id,
300                                                             item->generation,
301                                                             item - queue->pool);
302                 item->queue_timestamp = jiffies;
303
304                 ++queue->num_queued;
305                 ++queue->link_map_cache[txpriv->link_id];
306
307                 spin_lock_bh(&stats->lock);
308                 ++stats->num_queued;
309                 ++stats->link_map_cache[txpriv->link_id];
310                 spin_unlock_bh(&stats->lock);
311
312                 /* TX may happen in parallel sometimes.
313                  * Leave extra queue slots so we don't overflow.
314                  */
315                 if (queue->overfull == false &&
316                     queue->num_queued >=
317                     (queue->capacity - (num_present_cpus() - 1))) {
318                         queue->overfull = true;
319                         __cw1200_queue_lock(queue);
320                         mod_timer(&queue->gc, jiffies);
321                 }
322         } else {
323                 ret = -ENOENT;
324         }
325         spin_unlock_bh(&queue->lock);
326         return ret;
327 }
328
329 int cw1200_queue_get(struct cw1200_queue *queue,
330                      u32 link_id_map,
331                      struct wsm_tx **tx,
332                      struct ieee80211_tx_info **tx_info,
333                      const struct cw1200_txpriv **txpriv)
334 {
335         int ret = -ENOENT;
336         struct cw1200_queue_item *item;
337         struct cw1200_queue_stats *stats = queue->stats;
338         bool wakeup_stats = false;
339
340         spin_lock_bh(&queue->lock);
341         list_for_each_entry(item, &queue->queue, head) {
342                 if (link_id_map & BIT(item->txpriv.link_id)) {
343                         ret = 0;
344                         break;
345                 }
346         }
347
348         if (!WARN_ON(ret)) {
349                 *tx = (struct wsm_tx *)item->skb->data;
350                 *tx_info = IEEE80211_SKB_CB(item->skb);
351                 *txpriv = &item->txpriv;
352                 (*tx)->packet_id = item->packet_id;
353                 list_move_tail(&item->head, &queue->pending);
354                 ++queue->num_pending;
355                 --queue->link_map_cache[item->txpriv.link_id];
356                 item->xmit_timestamp = jiffies;
357
358                 spin_lock_bh(&stats->lock);
359                 --stats->num_queued;
360                 if (!--stats->link_map_cache[item->txpriv.link_id])
361                         wakeup_stats = true;
362                 spin_unlock_bh(&stats->lock);
363         }
364         spin_unlock_bh(&queue->lock);
365         if (wakeup_stats)
366                 wake_up(&stats->wait_link_id_empty);
367         return ret;
368 }
369
370 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
371 {
372         int ret = 0;
373         u8 queue_generation, queue_id, item_generation, item_id;
374         struct cw1200_queue_item *item;
375         struct cw1200_queue_stats *stats = queue->stats;
376
377         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
378                               &item_generation, &item_id);
379
380         item = &queue->pool[item_id];
381
382         spin_lock_bh(&queue->lock);
383         BUG_ON(queue_id != queue->queue_id);
384         if (queue_generation != queue->generation) {
385                 ret = -ENOENT;
386         } else if (item_id >= (unsigned) queue->capacity) {
387                 WARN_ON(1);
388                 ret = -EINVAL;
389         } else if (item->generation != item_generation) {
390                 WARN_ON(1);
391                 ret = -ENOENT;
392         } else {
393                 --queue->num_pending;
394                 ++queue->link_map_cache[item->txpriv.link_id];
395
396                 spin_lock_bh(&stats->lock);
397                 ++stats->num_queued;
398                 ++stats->link_map_cache[item->txpriv.link_id];
399                 spin_unlock_bh(&stats->lock);
400
401                 item->generation = ++item_generation;
402                 item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
403                                                             queue_id,
404                                                             item_generation,
405                                                             item_id);
406                 list_move(&item->head, &queue->queue);
407         }
408         spin_unlock_bh(&queue->lock);
409         return ret;
410 }
411
412 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
413 {
414         struct cw1200_queue_item *item, *tmp;
415         struct cw1200_queue_stats *stats = queue->stats;
416         spin_lock_bh(&queue->lock);
417
418         list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
419                 --queue->num_pending;
420                 ++queue->link_map_cache[item->txpriv.link_id];
421
422                 spin_lock_bh(&stats->lock);
423                 ++stats->num_queued;
424                 ++stats->link_map_cache[item->txpriv.link_id];
425                 spin_unlock_bh(&stats->lock);
426
427                 ++item->generation;
428                 item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
429                                                             queue->queue_id,
430                                                             item->generation,
431                                                             item - queue->pool);
432                 list_move(&item->head, &queue->queue);
433         }
434         spin_unlock_bh(&queue->lock);
435
436         return 0;
437 }
438
439 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
440 {
441         int ret = 0;
442         u8 queue_generation, queue_id, item_generation, item_id;
443         struct cw1200_queue_item *item;
444         struct cw1200_queue_stats *stats = queue->stats;
445         struct sk_buff *gc_skb = NULL;
446         struct cw1200_txpriv gc_txpriv;
447
448         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
449                               &item_generation, &item_id);
450
451         item = &queue->pool[item_id];
452
453         spin_lock_bh(&queue->lock);
454         BUG_ON(queue_id != queue->queue_id);
455         if (queue_generation != queue->generation) {
456                 ret = -ENOENT;
457         } else if (item_id >= (unsigned) queue->capacity) {
458                 WARN_ON(1);
459                 ret = -EINVAL;
460         } else if (item->generation != item_generation) {
461                 WARN_ON(1);
462                 ret = -ENOENT;
463         } else {
464                 gc_txpriv = item->txpriv;
465                 gc_skb = item->skb;
466                 item->skb = NULL;
467                 --queue->num_pending;
468                 --queue->num_queued;
469                 ++queue->num_sent;
470                 ++item->generation;
471                 /* Do not use list_move_tail here, but list_move:
472                  * try to utilize cache row.
473                  */
474                 list_move(&item->head, &queue->free_pool);
475
476                 if (queue->overfull &&
477                     (queue->num_queued <= (queue->capacity >> 1))) {
478                         queue->overfull = false;
479                         __cw1200_queue_unlock(queue);
480                 }
481         }
482         spin_unlock_bh(&queue->lock);
483
484         if (gc_skb)
485                 stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
486
487         return ret;
488 }
489
490 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
491                          struct sk_buff **skb,
492                          const struct cw1200_txpriv **txpriv)
493 {
494         int ret = 0;
495         u8 queue_generation, queue_id, item_generation, item_id;
496         struct cw1200_queue_item *item;
497         cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
498                               &item_generation, &item_id);
499
500         item = &queue->pool[item_id];
501
502         spin_lock_bh(&queue->lock);
503         BUG_ON(queue_id != queue->queue_id);
504         if (queue_generation != queue->generation) {
505                 ret = -ENOENT;
506         } else if (item_id >= (unsigned) queue->capacity) {
507                 WARN_ON(1);
508                 ret = -EINVAL;
509         } else if (item->generation != item_generation) {
510                 WARN_ON(1);
511                 ret = -ENOENT;
512         } else {
513                 *skb = item->skb;
514                 *txpriv = &item->txpriv;
515         }
516         spin_unlock_bh(&queue->lock);
517         return ret;
518 }
519
520 void cw1200_queue_lock(struct cw1200_queue *queue)
521 {
522         spin_lock_bh(&queue->lock);
523         __cw1200_queue_lock(queue);
524         spin_unlock_bh(&queue->lock);
525 }
526
527 void cw1200_queue_unlock(struct cw1200_queue *queue)
528 {
529         spin_lock_bh(&queue->lock);
530         __cw1200_queue_unlock(queue);
531         spin_unlock_bh(&queue->lock);
532 }
533
534 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
535                                      unsigned long *timestamp,
536                                      u32 pending_frame_id)
537 {
538         struct cw1200_queue_item *item;
539         bool ret;
540
541         spin_lock_bh(&queue->lock);
542         ret = !list_empty(&queue->pending);
543         if (ret) {
544                 list_for_each_entry(item, &queue->pending, head) {
545                         if (item->packet_id != pending_frame_id)
546                                 if (time_before(item->xmit_timestamp,
547                                                 *timestamp))
548                                         *timestamp = item->xmit_timestamp;
549                 }
550         }
551         spin_unlock_bh(&queue->lock);
552         return ret;
553 }
554
555 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
556                                  u32 link_id_map)
557 {
558         bool empty = true;
559
560         spin_lock_bh(&stats->lock);
561         if (link_id_map == (u32)-1) {
562                 empty = stats->num_queued == 0;
563         } else {
564                 int i;
565                 for (i = 0; i < stats->map_capacity; ++i) {
566                         if (link_id_map & BIT(i)) {
567                                 if (stats->link_map_cache[i]) {
568                                         empty = false;
569                                         break;
570                                 }
571                         }
572                 }
573         }
574         spin_unlock_bh(&stats->lock);
575
576         return empty;
577 }