1 // SPDX-License-Identifier: GPL-2.0-only
3 * O(1) TX queue with built-in allocator.
5 * Copyright (c) 2017-2020, Silicon Laboratories, Inc.
6 * Copyright (c) 2010, ST-Ericsson
8 #include <net/mac80211.h>
13 void wfx_tx_lock(struct wfx_dev *wdev)
15 atomic_inc(&wdev->tx_lock);
18 void wfx_tx_unlock(struct wfx_dev *wdev)
20 int tx_lock = atomic_dec_return(&wdev->tx_lock);
22 WARN(tx_lock < 0, "inconsistent tx_lock value");
24 wfx_bh_request_tx(wdev);
27 void wfx_tx_flush(struct wfx_dev *wdev)
31 // Do not wait for any reply if chip is frozen
32 if (wdev->chip_frozen)
36 mutex_lock(&wdev->hif_cmd.lock);
37 ret = wait_event_timeout(wdev->hif.tx_buffers_empty,
38 !wdev->hif.tx_buffers_used,
39 msecs_to_jiffies(3000));
41 dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
42 wdev->hif.tx_buffers_used);
43 wfx_pending_dump_old_frames(wdev, 3000);
44 // FIXME: drop pending frames here
45 wdev->chip_frozen = true;
47 mutex_unlock(&wdev->hif_cmd.lock);
51 void wfx_tx_lock_flush(struct wfx_dev *wdev)
57 void wfx_tx_queues_init(struct wfx_vif *wvif)
59 // The device is in charge to respect the details of the QoS parameters.
60 // The driver just ensure that it roughtly respect the priorities to
61 // avoid any shortage.
62 const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
65 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
66 skb_queue_head_init(&wvif->tx_queue[i].normal);
67 skb_queue_head_init(&wvif->tx_queue[i].cab);
68 wvif->tx_queue[i].priority = priorities[i];
72 void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
76 for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
77 WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
78 WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].normal));
79 WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].cab));
83 bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
85 return skb_queue_empty(&queue->normal) && skb_queue_empty(&queue->cab);
88 static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
89 struct sk_buff_head *skb_queue,
90 struct sk_buff_head *dropped)
92 struct sk_buff *skb, *tmp;
94 spin_lock_bh(&skb_queue->lock);
95 skb_queue_walk_safe(skb_queue, skb, tmp) {
96 __skb_unlink(skb, skb_queue);
97 skb_queue_head(dropped, skb);
99 spin_unlock_bh(&skb_queue->lock);
102 void wfx_tx_queue_drop(struct wfx_vif *wvif, struct wfx_queue *queue,
103 struct sk_buff_head *dropped)
105 __wfx_tx_queue_drop(wvif, &queue->cab, dropped);
106 __wfx_tx_queue_drop(wvif, &queue->normal, dropped);
107 wake_up(&wvif->wdev->tx_dequeue);
110 void wfx_tx_queues_put(struct wfx_vif *wvif, struct sk_buff *skb)
112 struct wfx_queue *queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
113 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
115 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
116 skb_queue_tail(&queue->cab, skb);
118 skb_queue_tail(&queue->normal, skb);
121 void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
123 struct wfx_queue *queue;
124 struct wfx_vif *wvif;
128 WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
130 while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
131 hif = (struct hif_msg *)skb->data;
132 wvif = wdev_to_wvif(wdev, hif->interface);
134 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
135 WARN_ON(skb_get_queue_mapping(skb) > 3);
136 WARN_ON(!atomic_read(&queue->pending_frames));
137 atomic_dec(&queue->pending_frames);
139 skb_queue_head(dropped, skb);
143 struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
145 struct wfx_queue *queue;
146 struct hif_req_tx *req;
147 struct wfx_vif *wvif;
151 spin_lock_bh(&wdev->tx_pending.lock);
152 skb_queue_walk(&wdev->tx_pending, skb) {
153 hif = (struct hif_msg *)skb->data;
154 req = (struct hif_req_tx *)hif->body;
155 if (req->packet_id != packet_id)
157 spin_unlock_bh(&wdev->tx_pending.lock);
158 wvif = wdev_to_wvif(wdev, hif->interface);
160 queue = &wvif->tx_queue[skb_get_queue_mapping(skb)];
161 WARN_ON(skb_get_queue_mapping(skb) > 3);
162 WARN_ON(!atomic_read(&queue->pending_frames));
163 atomic_dec(&queue->pending_frames);
165 skb_unlink(skb, &wdev->tx_pending);
168 spin_unlock_bh(&wdev->tx_pending.lock);
169 WARN(1, "cannot find packet in pending queue");
173 void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms)
175 ktime_t now = ktime_get();
176 struct wfx_tx_priv *tx_priv;
177 struct hif_req_tx *req;
181 spin_lock_bh(&wdev->tx_pending.lock);
182 skb_queue_walk(&wdev->tx_pending, skb) {
183 tx_priv = wfx_skb_tx_priv(skb);
184 req = wfx_skb_txreq(skb);
185 if (ktime_after(now, ktime_add_ms(tx_priv->xmit_timestamp,
188 dev_info(wdev->dev, "frames stuck in firmware since %dms or more:\n",
192 dev_info(wdev->dev, " id %08x sent %lldms ago\n",
194 ktime_ms_delta(now, tx_priv->xmit_timestamp));
197 spin_unlock_bh(&wdev->tx_pending.lock);
200 unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
203 ktime_t now = ktime_get();
204 struct wfx_tx_priv *tx_priv = wfx_skb_tx_priv(skb);
206 return ktime_us_delta(now, tx_priv->xmit_timestamp);
209 bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
213 if (wvif->vif->type != NL80211_IFTYPE_AP)
215 for (i = 0; i < IEEE80211_NUM_ACS; ++i)
216 // Note: since only AP can have mcast frames in queue and only
217 // one vif can be AP, all queued frames has same interface id
218 if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
223 static int wfx_tx_queue_get_weight(struct wfx_queue *queue)
225 return atomic_read(&queue->pending_frames) * queue->priority;
228 static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
230 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)];
231 int i, j, num_queues = 0;
232 struct wfx_vif *wvif;
238 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
239 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
240 WARN_ON(num_queues >= ARRAY_SIZE(queues));
241 queues[num_queues] = &wvif->tx_queue[i];
242 for (j = num_queues; j > 0; j--)
243 if (wfx_tx_queue_get_weight(queues[j]) <
244 wfx_tx_queue_get_weight(queues[j - 1]))
245 swap(queues[j - 1], queues[j]);
251 while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
252 if (!wvif->after_dtim_tx_allowed)
254 for (i = 0; i < num_queues; i++) {
255 skb = skb_dequeue(&queues[i]->cab);
258 // Note: since only AP can have mcast frames in queue
259 // and only one vif can be AP, all queued frames has
261 hif = (struct hif_msg *)skb->data;
262 WARN_ON(hif->interface != wvif->id);
264 &wvif->tx_queue[skb_get_queue_mapping(skb)]);
265 atomic_inc(&queues[i]->pending_frames);
266 trace_queues_stats(wdev, queues[i]);
269 // No more multicast to sent
270 wvif->after_dtim_tx_allowed = false;
271 schedule_work(&wvif->update_tim_work);
274 for (i = 0; i < num_queues; i++) {
275 skb = skb_dequeue(&queues[i]->normal);
277 atomic_inc(&queues[i]->pending_frames);
278 trace_queues_stats(wdev, queues[i]);
285 struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
287 struct wfx_tx_priv *tx_priv;
290 if (atomic_read(&wdev->tx_lock))
292 skb = wfx_tx_queues_get_skb(wdev);
295 skb_queue_tail(&wdev->tx_pending, skb);
296 wake_up(&wdev->tx_dequeue);
297 tx_priv = wfx_skb_tx_priv(skb);
298 tx_priv->xmit_timestamp = ktime_get();
299 return (struct hif_msg *)skb->data;