1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
8 static struct mt76_txwi_cache *
9 mt76_alloc_txwi(struct mt76_dev *dev)
11 struct mt76_txwi_cache *t;
16 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
17 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
21 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
23 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
29 static struct mt76_txwi_cache *
30 __mt76_get_txwi(struct mt76_dev *dev)
32 struct mt76_txwi_cache *t = NULL;
34 spin_lock_bh(&dev->lock);
35 if (!list_empty(&dev->txwi_cache)) {
36 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
40 spin_unlock_bh(&dev->lock);
45 struct mt76_txwi_cache *
46 mt76_get_txwi(struct mt76_dev *dev)
48 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
53 return mt76_alloc_txwi(dev);
57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
62 spin_lock_bh(&dev->lock);
63 list_add(&t->list, &dev->txwi_cache);
64 spin_unlock_bh(&dev->lock);
66 EXPORT_SYMBOL_GPL(mt76_put_txwi);
68 void mt76_tx_free(struct mt76_dev *dev)
70 struct mt76_txwi_cache *t;
72 while ((t = __mt76_get_txwi(dev)) != NULL)
73 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
78 mt76_txq_get_qid(struct ieee80211_txq *txq)
87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
89 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
91 if (!ieee80211_is_data_qos(hdr->frame_control) ||
92 !ieee80211_is_data_present(hdr->frame_control))
95 mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
100 __acquires(&dev->status_list.lock)
102 __skb_queue_head_init(list);
103 spin_lock_bh(&dev->status_list.lock);
104 __acquire(&dev->status_list.lock);
106 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
109 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
110 __releases(&dev->status_list.unlock)
112 struct ieee80211_hw *hw;
115 spin_unlock_bh(&dev->status_list.lock);
116 __release(&dev->status_list.unlock);
118 while ((skb = __skb_dequeue(list)) != NULL) {
119 hw = mt76_tx_status_get_hw(dev, skb);
120 ieee80211_tx_status(hw, skb);
124 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
127 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
128 struct sk_buff_head *list)
130 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
131 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
132 u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
137 if ((flags & done) != done)
140 __skb_unlink(skb, &dev->status_list);
142 /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
143 if (flags & MT_TX_CB_TXS_FAILED) {
144 ieee80211_tx_info_clear_status(info);
145 info->status.rates[0].idx = -1;
146 info->flags |= IEEE80211_TX_STAT_ACK;
149 __skb_queue_tail(list, skb);
153 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
154 struct sk_buff_head *list)
156 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
158 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
161 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
164 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
165 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
169 return MT_PACKET_ID_NO_ACK;
171 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
172 return MT_PACKET_ID_NO_ACK;
174 if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
175 IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
176 return MT_PACKET_ID_NO_SKB;
178 spin_lock_bh(&dev->status_list.lock);
180 memset(cb, 0, sizeof(*cb));
181 wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
182 if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
183 wcid->packet_id == MT_PACKET_ID_NO_SKB)
184 wcid->packet_id = MT_PACKET_ID_FIRST;
186 pid = wcid->packet_id;
187 cb->wcid = wcid->idx;
189 cb->jiffies = jiffies;
191 __skb_queue_tail(&dev->status_list, skb);
192 spin_unlock_bh(&dev->status_list.lock);
196 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
199 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
200 struct sk_buff_head *list)
202 struct sk_buff *skb, *tmp;
204 skb_queue_walk_safe(&dev->status_list, skb, tmp) {
205 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
207 if (wcid && cb->wcid != wcid->idx)
210 if (cb->pktid == pktid)
213 if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
214 MT_TX_STATUS_SKB_TIMEOUT))
217 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
218 MT_TX_CB_TXS_DONE, list);
223 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
226 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
228 struct sk_buff_head list;
230 mt76_tx_status_lock(dev, &list);
231 mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
232 mt76_tx_status_unlock(dev, &list);
234 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
236 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
238 struct ieee80211_hw *hw;
239 struct sk_buff_head list;
242 hw = mt76_tx_status_get_hw(dev, skb);
243 ieee80211_free_txskb(hw, skb);
247 mt76_tx_status_lock(dev, &list);
248 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
249 mt76_tx_status_unlock(dev, &list);
251 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
254 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
255 struct mt76_wcid *wcid, struct sk_buff *skb)
257 struct mt76_dev *dev = phy->dev;
258 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
259 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
260 struct mt76_queue *q;
261 int qid = skb_get_queue_mapping(skb);
262 bool ext_phy = phy != &dev->phy;
264 if (WARN_ON(qid >= MT_TXQ_PSD)) {
266 skb_set_queue_mapping(skb, qid);
269 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
270 ieee80211_get_tx_rates(info->control.vif, sta, skb,
271 info->control.rates, 1);
273 if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
274 struct ieee80211_txq *txq;
275 struct mt76_txq *mtxq;
278 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
280 mtxq = (struct mt76_txq *)txq->drv_priv;
283 mt76_check_agg_ssn(mtxq, skb);
287 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
289 q = dev->q_tx[qid].q;
291 spin_lock_bh(&q->lock);
292 dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
293 dev->queue_ops->kick(dev, q);
295 if (q->queued > q->ndesc - 8 && !q->stopped) {
296 ieee80211_stop_queue(phy->hw, skb_get_queue_mapping(skb));
300 spin_unlock_bh(&q->lock);
302 EXPORT_SYMBOL_GPL(mt76_tx);
304 static struct sk_buff *
305 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
307 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
308 struct ieee80211_tx_info *info;
309 bool ext_phy = phy != &phy->dev->phy;
312 skb = skb_dequeue(&mtxq->retry_q);
314 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
316 if (ps && skb_queue_empty(&mtxq->retry_q))
317 ieee80211_sta_set_buffered(txq->sta, tid, false);
322 skb = ieee80211_tx_dequeue(phy->hw, txq);
326 info = IEEE80211_SKB_CB(skb);
328 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
334 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
335 struct sk_buff *skb, bool last)
337 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
338 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
340 info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
342 info->flags |= IEEE80211_TX_STATUS_EOSP |
343 IEEE80211_TX_CTL_REQ_TX_STATUS;
345 mt76_skb_set_moredata(skb, !last);
346 dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
350 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
351 u16 tids, int nframes,
352 enum ieee80211_frame_release_type reason,
355 struct mt76_phy *phy = hw->priv;
356 struct mt76_dev *dev = phy->dev;
357 struct sk_buff *last_skb = NULL;
358 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
361 spin_lock_bh(&hwq->lock);
362 for (i = 0; tids && nframes; i++, tids >>= 1) {
363 struct ieee80211_txq *txq = sta->txq[i];
364 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
371 skb = mt76_txq_dequeue(phy, mtxq, true);
376 mt76_check_agg_ssn(mtxq, skb);
380 mt76_queue_ps_skb(dev, sta, last_skb, false);
387 mt76_queue_ps_skb(dev, sta, last_skb, true);
388 dev->queue_ops->kick(dev, hwq);
390 ieee80211_sta_eosp(sta);
393 spin_unlock_bh(&hwq->lock);
395 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
398 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
399 struct mt76_txq *mtxq)
401 struct mt76_dev *dev = phy->dev;
402 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
403 enum mt76_txq_id qid = mt76_txq_get_qid(txq);
404 struct mt76_wcid *wcid = mtxq->wcid;
405 struct mt76_queue *hwq = sq->q;
406 struct ieee80211_tx_info *info;
408 int n_frames = 1, limit;
409 struct ieee80211_tx_rate tx_rate;
414 if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
417 skb = mt76_txq_dequeue(phy, mtxq, false);
421 info = IEEE80211_SKB_CB(skb);
422 if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
423 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
424 info->control.rates, 1);
425 tx_rate = info->control.rates[0];
427 probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
428 ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
429 limit = ampdu ? 16 : 3;
432 mt76_check_agg_ssn(mtxq, skb);
434 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
445 if (test_bit(MT76_RESET, &dev->state))
448 skb = mt76_txq_dequeue(phy, mtxq, false);
452 info = IEEE80211_SKB_CB(skb);
453 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
455 if (ampdu != cur_ampdu ||
456 (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
457 skb_queue_tail(&mtxq->retry_q, skb);
461 info->control.rates[0] = tx_rate;
464 mt76_check_agg_ssn(mtxq, skb);
466 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
472 } while (n_frames < limit);
475 hwq->entry[idx].qid = sq - dev->q_tx;
476 hwq->entry[idx].schedule = true;
480 dev->queue_ops->kick(dev, hwq);
486 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
488 struct mt76_dev *dev = phy->dev;
489 struct mt76_sw_queue *sq = &dev->q_tx[qid];
490 struct mt76_queue *hwq = sq->q;
491 struct ieee80211_txq *txq;
492 struct mt76_txq *mtxq;
493 struct mt76_wcid *wcid;
496 spin_lock_bh(&hwq->lock);
498 if (sq->swq_queued >= 4)
501 if (test_bit(MT76_RESET, &dev->state)) {
506 txq = ieee80211_next_txq(phy->hw, qid);
510 mtxq = (struct mt76_txq *)txq->drv_priv;
512 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
515 if (mtxq->send_bar && mtxq->aggr) {
516 struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
517 struct ieee80211_sta *sta = txq->sta;
518 struct ieee80211_vif *vif = txq->vif;
519 u16 agg_ssn = mtxq->agg_ssn;
522 mtxq->send_bar = false;
523 spin_unlock_bh(&hwq->lock);
524 ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
525 spin_lock_bh(&hwq->lock);
528 ret += mt76_txq_send_burst(phy, sq, mtxq);
529 ieee80211_return_txq(phy->hw, txq,
530 !skb_queue_empty(&mtxq->retry_q));
532 spin_unlock_bh(&hwq->lock);
537 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
539 struct mt76_dev *dev = phy->dev;
540 struct mt76_sw_queue *sq = &dev->q_tx[qid];
546 if (sq->swq_queued >= 4)
552 ieee80211_txq_schedule_start(phy->hw, qid);
553 len = mt76_txq_schedule_list(phy, qid);
554 ieee80211_txq_schedule_end(phy->hw, qid);
559 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
561 void mt76_txq_schedule_all(struct mt76_phy *phy)
565 for (i = 0; i <= MT_TXQ_BK; i++)
566 mt76_txq_schedule(phy, i);
568 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
570 void mt76_tx_tasklet(unsigned long data)
572 struct mt76_dev *dev = (struct mt76_dev *)data;
574 mt76_txq_schedule_all(&dev->phy);
576 mt76_txq_schedule_all(dev->phy2);
579 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
584 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
585 struct ieee80211_txq *txq = sta->txq[i];
586 struct mt76_queue *hwq;
587 struct mt76_txq *mtxq;
592 mtxq = (struct mt76_txq *)txq->drv_priv;
595 spin_lock_bh(&hwq->lock);
596 mtxq->send_bar = mtxq->aggr && send_bar;
597 spin_unlock_bh(&hwq->lock);
600 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
602 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
604 struct mt76_phy *phy = hw->priv;
605 struct mt76_dev *dev = phy->dev;
607 if (!test_bit(MT76_STATE_RUNNING, &dev->state))
610 tasklet_schedule(&dev->tx_tasklet);
612 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
614 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
616 struct ieee80211_hw *hw;
617 struct mt76_txq *mtxq;
623 mtxq = (struct mt76_txq *)txq->drv_priv;
625 while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
626 hw = mt76_tx_status_get_hw(dev, skb);
627 ieee80211_free_txskb(hw, skb);
630 EXPORT_SYMBOL_GPL(mt76_txq_remove);
632 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
634 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
636 skb_queue_head_init(&mtxq->retry_q);
638 mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
640 EXPORT_SYMBOL_GPL(mt76_txq_init);
642 u8 mt76_ac_to_hwq(u8 ac)
644 static const u8 wmm_queue_map[] = {
645 [IEEE80211_AC_BE] = 0,
646 [IEEE80211_AC_BK] = 1,
647 [IEEE80211_AC_VI] = 2,
648 [IEEE80211_AC_VO] = 3,
651 if (WARN_ON(ac >= IEEE80211_NUM_ACS))
654 return wmm_queue_map[ac];
656 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);