1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator.
5 * Copyright (c) 2017-2019, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
8 #include <linux/sched.h>
9 #include <net/mac80211.h>
16 void wfx_tx_lock(struct wfx_dev *wdev)
18 atomic_inc(&wdev->tx_lock);
21 void wfx_tx_unlock(struct wfx_dev *wdev)
23 int tx_lock = atomic_dec_return(&wdev->tx_lock);
25 WARN(tx_lock < 0, "inconsistent tx_lock value");
27 wfx_bh_request_tx(wdev);
30 void wfx_tx_flush(struct wfx_dev *wdev)
34 // Do not wait for any reply if chip is frozen
35 if (wdev->chip_frozen)
38 mutex_lock(&wdev->hif_cmd.lock);
39 ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
40 !wdev->hif.tx_buffers_used,
41 msecs_to_jiffies(3000));
43 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
44 wdev->hif.tx_buffers_used);
45 wfx_pending_dump_old_frames(wdev, 3000);
46 // FIXME: drop pending frames here
47 wdev->chip_frozen = 1;
49 mutex_unlock(&wdev->hif_cmd.lock);
52 void wfx_tx_lock_flush(struct wfx_dev *wdev)
58 void wfx_tx_queues_lock(struct wfx_dev *wdev)
61 struct wfx_queue *queue;
63 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
64 queue = &wdev->tx_queue[i];
65 spin_lock_bh(&queue->queue.lock);
66 if (queue->tx_locked_cnt++ == 0)
67 ieee80211_stop_queue(wdev->hw, queue->queue_id);
68 spin_unlock_bh(&queue->queue.lock);
72 void wfx_tx_queues_unlock(struct wfx_dev *wdev)
75 struct wfx_queue *queue;
77 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
78 queue = &wdev->tx_queue[i];
79 spin_lock_bh(&queue->queue.lock);
80 WARN(!queue->tx_locked_cnt, "queue already unlocked");
81 if (--queue->tx_locked_cnt == 0)
82 ieee80211_wake_queue(wdev->hw, queue->queue_id);
83 spin_unlock_bh(&queue->queue.lock);
87 /* If successful, LOCKS the TX queue! */
88 void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
92 struct wfx_queue *queue;
94 struct wfx_dev *wdev = wvif->wdev;
97 if (wvif->wdev->chip_frozen) {
98 wfx_tx_lock_flush(wdev);
99 wfx_tx_queues_clear(wdev);
105 wfx_tx_lock_flush(wdev);
106 for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
107 queue = &wdev->tx_queue[i];
108 spin_lock_bh(&queue->queue.lock);
109 skb_queue_walk(&queue->queue, item) {
110 hif = (struct hif_msg *) item->data;
111 if (hif->interface == wvif->id)
114 spin_unlock_bh(&queue->queue.lock);
123 static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
124 struct sk_buff_head *gc_list)
127 struct sk_buff *item;
128 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
130 spin_lock_bh(&queue->queue.lock);
131 while ((item = __skb_dequeue(&queue->queue)) != NULL)
132 skb_queue_head(gc_list, item);
133 spin_lock_bh(&stats->pending.lock);
134 for (i = 0; i < ARRAY_SIZE(stats->link_map_cache); ++i) {
135 stats->link_map_cache[i] -= queue->link_map_cache[i];
136 queue->link_map_cache[i] = 0;
138 spin_unlock_bh(&stats->pending.lock);
139 spin_unlock_bh(&queue->queue.lock);
142 void wfx_tx_queues_clear(struct wfx_dev *wdev)
145 struct sk_buff *item;
146 struct sk_buff_head gc_list;
147 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
149 skb_queue_head_init(&gc_list);
150 for (i = 0; i < IEEE80211_NUM_ACS; ++i)
151 wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
152 wake_up(&stats->wait_link_id_empty);
153 while ((item = skb_dequeue(&gc_list)) != NULL)
154 wfx_skb_dtor(wdev, item);
157 void wfx_tx_queues_init(struct wfx_dev *wdev)
161 memset(&wdev->tx_queue_stats, 0, sizeof(wdev->tx_queue_stats));
162 memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
163 skb_queue_head_init(&wdev->tx_queue_stats.pending);
164 init_waitqueue_head(&wdev->tx_queue_stats.wait_link_id_empty);
166 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
167 wdev->tx_queue[i].queue_id = i;
168 skb_queue_head_init(&wdev->tx_queue[i].queue);
172 void wfx_tx_queues_deinit(struct wfx_dev *wdev)
174 WARN_ON(!skb_queue_empty(&wdev->tx_queue_stats.pending));
175 wfx_tx_queues_clear(wdev);
178 size_t wfx_tx_queue_get_num_queued(struct wfx_queue *queue,
187 spin_lock_bh(&queue->queue.lock);
188 if (link_id_map == (u32)-1) {
189 ret = skb_queue_len(&queue->queue);
192 for (i = 0, bit = 1; i < ARRAY_SIZE(queue->link_map_cache);
194 if (link_id_map & bit)
195 ret += queue->link_map_cache[i];
198 spin_unlock_bh(&queue->queue.lock);
202 void wfx_tx_queue_put(struct wfx_dev *wdev, struct wfx_queue *queue,
205 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
206 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
208 WARN(tx_priv->link_id >= ARRAY_SIZE(stats->link_map_cache), "invalid link-id value");
209 spin_lock_bh(&queue->queue.lock);
210 __skb_queue_tail(&queue->queue, skb);
212 ++queue->link_map_cache[tx_priv->link_id];
214 spin_lock_bh(&stats->pending.lock);
215 ++stats->link_map_cache[tx_priv->link_id];
216 spin_unlock_bh(&stats->pending.lock);
217 spin_unlock_bh(&queue->queue.lock);
220 static struct sk_buff *wfx_tx_queue_get(struct wfx_dev *wdev,
221 struct wfx_queue *queue,
224 struct sk_buff *skb = NULL;
225 struct sk_buff *item;
226 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
227 struct wfx_tx_priv *tx_priv;
228 bool wakeup_stats = false;
230 spin_lock_bh(&queue->queue.lock);
231 skb_queue_walk(&queue->queue, item) {
232 tx_priv = wfx_skb_tx_priv(item);
233 if (link_id_map & BIT(tx_priv->link_id)) {
240 tx_priv = wfx_skb_tx_priv(skb);
241 tx_priv->xmit_timestamp = ktime_get();
242 __skb_unlink(skb, &queue->queue);
243 --queue->link_map_cache[tx_priv->link_id];
245 spin_lock_bh(&stats->pending.lock);
246 __skb_queue_tail(&stats->pending, skb);
247 if (!--stats->link_map_cache[tx_priv->link_id])
249 spin_unlock_bh(&stats->pending.lock);
251 spin_unlock_bh(&queue->queue.lock);
253 wake_up(&stats->wait_link_id_empty);
257 int wfx_pending_requeue(struct wfx_dev *wdev, struct sk_buff *skb)
259 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
260 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
261 struct wfx_queue *queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
263 WARN_ON(skb_get_queue_mapping(skb) > 3);
264 spin_lock_bh(&queue->queue.lock);
265 ++queue->link_map_cache[tx_priv->link_id];
267 spin_lock_bh(&stats->pending.lock);
268 ++stats->link_map_cache[tx_priv->link_id];
269 __skb_unlink(skb, &stats->pending);
270 spin_unlock_bh(&stats->pending.lock);
271 __skb_queue_tail(&queue->queue, skb);
272 spin_unlock_bh(&queue->queue.lock);
276 int wfx_pending_remove(struct wfx_dev *wdev, struct sk_buff *skb)
278 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
280 spin_lock_bh(&stats->pending.lock);
281 __skb_unlink(skb, &stats->pending);
282 spin_unlock_bh(&stats->pending.lock);
283 wfx_skb_dtor(wdev, skb);
288 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
291 struct hif_req_tx *req;
292 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
294 spin_lock_bh(&stats->pending.lock);
295 skb_queue_walk(&stats->pending, skb) {
296 req = wfx_skb_txreq(skb);
297 if (req->packet_id == packet_id) {
298 spin_unlock_bh(&stats->pending.lock);
302 spin_unlock_bh(&stats->pending.lock);
303 WARN(1, "cannot find packet in pending queue");
307 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
309 struct wfx_queue_stats *stats = &wdev->tx_queue_stats;
310 ktime_t now = ktime_get();
311 struct wfx_tx_priv *tx_priv;
312 struct hif_req_tx *req;
316 spin_lock_bh(&stats->pending.lock);
317 skb_queue_walk(&stats->pending, skb) {
318 tx_priv = wfx_skb_tx_priv(skb);
319 req = wfx_skb_txreq(skb);
320 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
323 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
327 dev_info(wdev->dev, " id %08x sent %lldms ago\n",
329 ktime_ms_delta(now, tx_priv->xmit_timestamp));
332 spin_unlock_bh(&stats->pending.lock);
335 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
338 ktime_t now = ktime_get();
339 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
341 return ktime_us_delta(now, tx_priv->xmit_timestamp);
344 bool wfx_tx_queues_is_empty(struct wfx_dev *wdev)
347 struct sk_buff_head *queue;
350 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
351 queue = &wdev->tx_queue[i].queue;
352 spin_lock_bh(&queue->lock);
353 if (!skb_queue_empty(queue))
355 spin_unlock_bh(&queue->lock);
360 static bool hif_handle_tx_data(struct wfx_vif *wvif, struct sk_buff *skb,
361 struct wfx_queue *queue)
363 bool handled = false;
364 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
365 struct hif_req_tx *req = wfx_skb_txreq(skb);
366 struct ieee80211_hdr *frame = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
375 switch (wvif->vif->type) {
376 case NL80211_IFTYPE_STATION:
377 if (wvif->state < WFX_STATE_PRE_STA)
380 case NL80211_IFTYPE_AP:
384 case NL80211_IFTYPE_ADHOC:
385 if (wvif->state != WFX_STATE_IBSS)
388 case NL80211_IFTYPE_MONITOR:
394 if (action == do_tx) {
395 if (ieee80211_is_nullfunc(frame->frame_control)) {
396 mutex_lock(&wvif->bss_loss_lock);
397 if (wvif->bss_loss_state) {
398 wvif->bss_loss_confirm_id = req->packet_id;
399 req->queue_id.queue_id = HIF_QUEUE_ID_VOICE;
401 mutex_unlock(&wvif->bss_loss_lock);
402 } else if (ieee80211_has_protected(frame->frame_control) &&
404 tx_priv->hw_key->keyidx != wvif->wep_default_key_id &&
405 (tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
406 tx_priv->hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) {
413 wfx_pending_remove(wvif->wdev, skb);
417 wfx_tx_lock(wvif->wdev);
418 WARN_ON(wvif->wep_pending_skb);
419 wvif->wep_default_key_id = tx_priv->hw_key->keyidx;
420 wvif->wep_pending_skb = skb;
421 if (!schedule_work(&wvif->wep_key_work))
422 wfx_tx_unlock(wvif->wdev);
434 static int wfx_get_prio_queue(struct wfx_vif *wvif,
435 u32 tx_allowed_mask, int *total)
437 static const int urgent = BIT(WFX_LINK_ID_AFTER_DTIM) |
438 BIT(WFX_LINK_ID_UAPSD);
439 const struct ieee80211_tx_queue_params *edca;
440 unsigned int score, best = -1;
444 /* search for a winner using edca params */
445 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
448 edca = &wvif->edca_params[i];
449 queued = wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[i],
454 score = ((edca->aifs + edca->cw_min) << 16) +
455 ((edca->cw_max - edca->cw_min) *
456 (get_random_int() & 0xFFFF));
457 if (score < best && (winner < 0 || i != 3)) {
463 /* override winner if bursting */
464 if (winner >= 0 && wvif->wdev->tx_burst_idx >= 0 &&
465 winner != wvif->wdev->tx_burst_idx &&
466 !wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[winner],
467 tx_allowed_mask & urgent) &&
468 wfx_tx_queue_get_num_queued(&wvif->wdev->tx_queue[wvif->wdev->tx_burst_idx], tx_allowed_mask))
469 winner = wvif->wdev->tx_burst_idx;
474 static int wfx_tx_queue_mask_get(struct wfx_vif *wvif,
475 struct wfx_queue **queue_p,
476 u32 *tx_allowed_mask_p,
483 /* Search for a queue with multicast frames buffered */
484 if (wvif->mcast_tx) {
485 tx_allowed_mask = BIT(WFX_LINK_ID_AFTER_DTIM);
486 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
493 /* Search for unicast traffic */
494 tx_allowed_mask = ~wvif->sta_asleep_mask;
495 tx_allowed_mask |= BIT(WFX_LINK_ID_UAPSD);
496 if (wvif->sta_asleep_mask)
497 tx_allowed_mask &= ~BIT(WFX_LINK_ID_AFTER_DTIM);
499 tx_allowed_mask |= BIT(WFX_LINK_ID_AFTER_DTIM);
500 idx = wfx_get_prio_queue(wvif, tx_allowed_mask, &total);
505 *queue_p = &wvif->wdev->tx_queue[idx];
506 *tx_allowed_mask_p = tx_allowed_mask;
510 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
513 struct hif_msg *hif = NULL;
514 struct hif_req_tx *req = NULL;
515 struct wfx_queue *queue = NULL;
516 struct wfx_queue *vif_queue = NULL;
517 u32 tx_allowed_mask = 0;
518 u32 vif_tx_allowed_mask = 0;
519 const struct wfx_tx_priv *tx_priv = NULL;
520 struct wfx_vif *wvif;
521 /* More is used only for broadcasts. */
523 bool vif_more = false;
530 struct ieee80211_hdr *hdr;
532 if (atomic_read(&wdev->tx_lock))
536 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
537 spin_lock_bh(&wvif->ps_state_lock);
539 not_found = wfx_tx_queue_mask_get(wvif, &vif_queue,
540 &vif_tx_allowed_mask,
543 if (wvif->mcast_buffered && (not_found || !vif_more) &&
545 !wvif->sta_asleep_mask)) {
546 wvif->mcast_buffered = false;
547 if (wvif->mcast_tx) {
548 wvif->mcast_tx = false;
549 schedule_work(&wvif->mcast_stop_work);
553 spin_unlock_bh(&wvif->ps_state_lock);
557 tx_allowed_mask = vif_tx_allowed_mask;
561 } else if (!not_found) {
562 if (queue && queue != vif_queue)
563 dev_info(wdev->dev, "vifs disagree about queue priority\n");
564 tx_allowed_mask |= vif_tx_allowed_mask;
573 queue_num = queue - wdev->tx_queue;
575 skb = wfx_tx_queue_get(wdev, queue, tx_allowed_mask);
578 tx_priv = wfx_skb_tx_priv(skb);
579 hif = (struct hif_msg *) skb->data;
580 wvif = wdev_to_wvif(wdev, hif->interface);
583 if (hif_handle_tx_data(wvif, skb, queue))
584 continue; /* Handled by WSM */
586 /* allow bursting if txop is set */
587 if (wvif->edca_params[queue_num].txop)
588 burst = (int)wfx_tx_queue_get_num_queued(queue, tx_allowed_mask) + 1;
592 /* store index of bursting queue */
594 wdev->tx_burst_idx = queue_num;
596 wdev->tx_burst_idx = -1;
598 /* more buffered multicast/broadcast frames
599 * ==> set MoreData flag in IEEE 802.11 header
603 req = (struct hif_req_tx *) hif->body;
604 hdr = (struct ieee80211_hdr *) (req->frame + req->data_flags.fc_offset);
605 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);