1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
5 #include <linux/sched.h>
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
23 static const struct ieee80211_channel mt76_channels_2ghz[] = {
40 static const struct ieee80211_channel mt76_channels_5ghz[] = {
73 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
74 { .throughput = 0 * 1024, .blink_time = 334 },
75 { .throughput = 1 * 1024, .blink_time = 260 },
76 { .throughput = 5 * 1024, .blink_time = 220 },
77 { .throughput = 10 * 1024, .blink_time = 190 },
78 { .throughput = 20 * 1024, .blink_time = 170 },
79 { .throughput = 50 * 1024, .blink_time = 150 },
80 { .throughput = 70 * 1024, .blink_time = 130 },
81 { .throughput = 100 * 1024, .blink_time = 110 },
82 { .throughput = 200 * 1024, .blink_time = 80 },
83 { .throughput = 300 * 1024, .blink_time = 50 },
86 static int mt76_led_init(struct mt76_dev *dev)
88 struct device_node *np = dev->dev->of_node;
89 struct ieee80211_hw *hw = dev->hw;
92 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
95 snprintf(dev->led_name, sizeof(dev->led_name),
96 "mt76-%s", wiphy_name(hw->wiphy));
98 dev->led_cdev.name = dev->led_name;
99 dev->led_cdev.default_trigger =
100 ieee80211_create_tpt_led_trigger(hw,
101 IEEE80211_TPT_LEDTRIG_FL_RADIO,
103 ARRAY_SIZE(mt76_tpt_blink));
105 np = of_get_child_by_name(np, "led");
107 if (!of_property_read_u32(np, "led-sources", &led_pin))
108 dev->led_pin = led_pin;
109 dev->led_al = of_property_read_bool(np, "led-active-low");
112 return led_classdev_register(dev->dev, &dev->led_cdev);
115 static void mt76_led_cleanup(struct mt76_dev *dev)
117 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
120 led_classdev_unregister(&dev->led_cdev);
123 static void mt76_init_stream_cap(struct mt76_phy *phy,
124 struct ieee80211_supported_band *sband,
127 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
128 int i, nstream = hweight8(phy->antenna_mask);
129 struct ieee80211_sta_vht_cap *vht_cap;
133 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
135 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
137 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
138 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
143 vht_cap = &sband->vht_cap;
145 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
147 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
149 for (i = 0; i < 8; i++) {
151 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
154 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
156 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
157 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
160 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
162 if (phy->cap.has_2ghz)
163 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
164 if (phy->cap.has_5ghz)
165 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
167 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
170 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
171 const struct ieee80211_channel *chan, int n_chan,
172 struct ieee80211_rate *rates, int n_rates, bool vht)
174 struct ieee80211_supported_band *sband = &msband->sband;
175 struct ieee80211_sta_vht_cap *vht_cap;
176 struct ieee80211_sta_ht_cap *ht_cap;
177 struct mt76_dev *dev = phy->dev;
181 size = n_chan * sizeof(*chan);
182 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
186 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
191 sband->channels = chanlist;
192 sband->n_channels = n_chan;
193 sband->bitrates = rates;
194 sband->n_bitrates = n_rates;
196 ht_cap = &sband->ht_cap;
197 ht_cap->ht_supported = true;
198 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
199 IEEE80211_HT_CAP_GRN_FLD |
200 IEEE80211_HT_CAP_SGI_20 |
201 IEEE80211_HT_CAP_SGI_40 |
202 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
204 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
205 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 mt76_init_stream_cap(phy, sband, vht);
212 vht_cap = &sband->vht_cap;
213 vht_cap->vht_supported = true;
214 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
215 IEEE80211_VHT_CAP_RXSTBC_1 |
216 IEEE80211_VHT_CAP_SHORT_GI_80 |
217 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
218 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
219 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
225 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
228 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
230 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
231 ARRAY_SIZE(mt76_channels_2ghz), rates,
236 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
237 int n_rates, bool vht)
239 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
241 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
242 ARRAY_SIZE(mt76_channels_5ghz), rates,
247 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
248 enum nl80211_band band)
250 struct ieee80211_supported_band *sband = &msband->sband;
257 for (i = 0; i < sband->n_channels; i++) {
258 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
266 phy->chandef.chan = &sband->channels[0];
267 phy->chan_state = &msband->chan[0];
271 sband->n_channels = 0;
272 phy->hw->wiphy->bands[band] = NULL;
276 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
278 struct mt76_dev *dev = phy->dev;
279 struct wiphy *wiphy = hw->wiphy;
281 SET_IEEE80211_DEV(hw, dev->dev);
282 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
284 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
285 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
286 WIPHY_FLAG_SUPPORTS_TDLS |
289 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
290 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
291 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
293 wiphy->available_antennas_tx = dev->phy.antenna_mask;
294 wiphy->available_antennas_rx = dev->phy.antenna_mask;
296 hw->txq_data_size = sizeof(struct mt76_txq);
297 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
299 if (!hw->max_tx_fragments)
300 hw->max_tx_fragments = 16;
302 ieee80211_hw_set(hw, SIGNAL_DBM);
303 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
304 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
305 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
306 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
307 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
308 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
310 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
311 ieee80211_hw_set(hw, TX_AMSDU);
312 ieee80211_hw_set(hw, TX_FRAG_LIST);
315 ieee80211_hw_set(hw, MFP_CAPABLE);
316 ieee80211_hw_set(hw, AP_LINK_PS);
317 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
319 wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
320 wiphy->interface_modes =
321 BIT(NL80211_IFTYPE_STATION) |
322 BIT(NL80211_IFTYPE_AP) |
323 #ifdef CONFIG_MAC80211_MESH
324 BIT(NL80211_IFTYPE_MESH_POINT) |
326 BIT(NL80211_IFTYPE_P2P_CLIENT) |
327 BIT(NL80211_IFTYPE_P2P_GO) |
328 BIT(NL80211_IFTYPE_ADHOC);
332 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
333 const struct ieee80211_ops *ops)
335 struct ieee80211_hw *hw;
336 unsigned int phy_size;
337 struct mt76_phy *phy;
339 phy_size = ALIGN(sizeof(*phy), 8);
340 hw = ieee80211_alloc_hw(size + phy_size, ops);
347 phy->priv = hw->priv + phy_size;
351 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
353 int mt76_register_phy(struct mt76_phy *phy, bool vht,
354 struct ieee80211_rate *rates, int n_rates)
358 mt76_phy_init(phy, phy->hw);
360 if (phy->cap.has_2ghz) {
361 ret = mt76_init_sband_2g(phy, rates, n_rates);
366 if (phy->cap.has_5ghz) {
367 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
372 wiphy_read_of_freq_limits(phy->hw->wiphy);
373 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
374 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
376 ret = ieee80211_register_hw(phy->hw);
380 phy->dev->phy2 = phy;
384 EXPORT_SYMBOL_GPL(mt76_register_phy);
386 void mt76_unregister_phy(struct mt76_phy *phy)
388 struct mt76_dev *dev = phy->dev;
390 mt76_tx_status_check(dev, NULL, true);
391 ieee80211_unregister_hw(phy->hw);
394 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
397 mt76_alloc_device(struct device *pdev, unsigned int size,
398 const struct ieee80211_ops *ops,
399 const struct mt76_driver_ops *drv_ops)
401 struct ieee80211_hw *hw;
402 struct mt76_phy *phy;
403 struct mt76_dev *dev;
406 hw = ieee80211_alloc_hw(size, ops);
419 spin_lock_init(&dev->rx_lock);
420 spin_lock_init(&dev->lock);
421 spin_lock_init(&dev->cc_lock);
422 mutex_init(&dev->mutex);
423 init_waitqueue_head(&dev->tx_wait);
424 skb_queue_head_init(&dev->status_list);
426 skb_queue_head_init(&dev->mcu.res_q);
427 init_waitqueue_head(&dev->mcu.wait);
428 mutex_init(&dev->mcu.mutex);
429 dev->tx_worker.fn = mt76_tx_worker;
431 spin_lock_init(&dev->token_lock);
432 idr_init(&dev->token);
434 INIT_LIST_HEAD(&dev->txwi_cache);
436 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
437 skb_queue_head_init(&dev->rx_skb[i]);
439 dev->wq = alloc_ordered_workqueue("mt76", 0);
441 ieee80211_free_hw(hw);
447 EXPORT_SYMBOL_GPL(mt76_alloc_device);
449 int mt76_register_device(struct mt76_dev *dev, bool vht,
450 struct ieee80211_rate *rates, int n_rates)
452 struct ieee80211_hw *hw = dev->hw;
453 struct mt76_phy *phy = &dev->phy;
456 dev_set_drvdata(dev->dev, dev);
457 mt76_phy_init(phy, hw);
459 if (phy->cap.has_2ghz) {
460 ret = mt76_init_sband_2g(phy, rates, n_rates);
465 if (phy->cap.has_5ghz) {
466 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
471 wiphy_read_of_freq_limits(hw->wiphy);
472 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
473 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
475 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
476 ret = mt76_led_init(dev);
481 ret = ieee80211_register_hw(hw);
485 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
486 sched_set_fifo_low(dev->tx_worker.task);
490 EXPORT_SYMBOL_GPL(mt76_register_device);
492 void mt76_unregister_device(struct mt76_dev *dev)
494 struct ieee80211_hw *hw = dev->hw;
496 if (IS_ENABLED(CONFIG_MT76_LEDS))
497 mt76_led_cleanup(dev);
498 mt76_tx_status_check(dev, NULL, true);
499 ieee80211_unregister_hw(hw);
501 EXPORT_SYMBOL_GPL(mt76_unregister_device);
503 void mt76_free_device(struct mt76_dev *dev)
505 mt76_worker_teardown(&dev->tx_worker);
507 destroy_workqueue(dev->wq);
510 ieee80211_free_hw(dev->hw);
512 EXPORT_SYMBOL_GPL(mt76_free_device);
514 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
516 struct sk_buff *skb = phy->rx_amsdu[q].head;
517 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
518 struct mt76_dev *dev = phy->dev;
520 phy->rx_amsdu[q].head = NULL;
521 phy->rx_amsdu[q].tail = NULL;
524 * Validate if the amsdu has a proper first subframe.
525 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
526 * flag of the QoS header gets flipped. In such cases, the first
527 * subframe has a LLC/SNAP header in the location of the destination
530 if (skb_shinfo(skb)->frag_list) {
533 if (!(status->flag & RX_FLAG_8023)) {
534 offset = ieee80211_get_hdrlen_from_skb(skb);
537 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
542 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
547 __skb_queue_tail(&dev->rx_skb[q], skb);
550 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
553 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
555 if (phy->rx_amsdu[q].head &&
556 (!status->amsdu || status->first_amsdu ||
557 status->seqno != phy->rx_amsdu[q].seqno))
558 mt76_rx_release_amsdu(phy, q);
560 if (!phy->rx_amsdu[q].head) {
561 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
562 phy->rx_amsdu[q].seqno = status->seqno;
563 phy->rx_amsdu[q].head = skb;
565 *phy->rx_amsdu[q].tail = skb;
566 phy->rx_amsdu[q].tail = &skb->next;
569 if (!status->amsdu || status->last_amsdu)
570 mt76_rx_release_amsdu(phy, q);
573 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
575 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
576 struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
578 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
583 #ifdef CONFIG_NL80211_TESTMODE
584 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
585 phy->test.rx_stats.packets[q]++;
586 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
587 phy->test.rx_stats.fcs_error[q]++;
591 mt76_rx_release_burst(phy, q, skb);
593 EXPORT_SYMBOL_GPL(mt76_rx);
595 bool mt76_has_tx_pending(struct mt76_phy *phy)
597 struct mt76_queue *q;
600 for (i = 0; i < __MT_TXQ_MAX; i++) {
608 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
610 static struct mt76_channel_state *
611 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
613 struct mt76_sband *msband;
616 if (c->band == NL80211_BAND_2GHZ)
617 msband = &phy->sband_2g;
619 msband = &phy->sband_5g;
621 idx = c - &msband->sband.channels[0];
622 return &msband->chan[idx];
625 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
627 struct mt76_channel_state *state = phy->chan_state;
629 state->cc_active += ktime_to_us(ktime_sub(time,
631 phy->survey_time = time;
633 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
635 void mt76_update_survey(struct mt76_dev *dev)
639 if (dev->drv->update_survey)
640 dev->drv->update_survey(dev);
642 cur_time = ktime_get_boottime();
643 mt76_update_survey_active_time(&dev->phy, cur_time);
645 mt76_update_survey_active_time(dev->phy2, cur_time);
647 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
648 struct mt76_channel_state *state = dev->phy.chan_state;
650 spin_lock_bh(&dev->cc_lock);
651 state->cc_bss_rx += dev->cur_cc_bss_rx;
652 dev->cur_cc_bss_rx = 0;
653 spin_unlock_bh(&dev->cc_lock);
656 EXPORT_SYMBOL_GPL(mt76_update_survey);
658 void mt76_set_channel(struct mt76_phy *phy)
660 struct mt76_dev *dev = phy->dev;
661 struct ieee80211_hw *hw = phy->hw;
662 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
663 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
664 int timeout = HZ / 5;
666 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
667 mt76_update_survey(dev);
669 phy->chandef = *chandef;
670 phy->chan_state = mt76_channel_state(phy, chandef->chan);
673 phy->main_chan = chandef->chan;
675 if (chandef->chan != phy->main_chan)
676 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
678 EXPORT_SYMBOL_GPL(mt76_set_channel);
680 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
681 struct survey_info *survey)
683 struct mt76_phy *phy = hw->priv;
684 struct mt76_dev *dev = phy->dev;
685 struct mt76_sband *sband;
686 struct ieee80211_channel *chan;
687 struct mt76_channel_state *state;
690 mutex_lock(&dev->mutex);
691 if (idx == 0 && dev->drv->update_survey)
692 mt76_update_survey(dev);
694 sband = &phy->sband_2g;
695 if (idx >= sband->sband.n_channels) {
696 idx -= sband->sband.n_channels;
697 sband = &phy->sband_5g;
700 if (idx >= sband->sband.n_channels) {
705 chan = &sband->sband.channels[idx];
706 state = mt76_channel_state(phy, chan);
708 memset(survey, 0, sizeof(*survey));
709 survey->channel = chan;
710 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
711 survey->filled |= dev->drv->survey_flags;
713 survey->filled |= SURVEY_INFO_NOISE_DBM;
715 if (chan == phy->main_chan) {
716 survey->filled |= SURVEY_INFO_IN_USE;
718 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
719 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
722 survey->time_busy = div_u64(state->cc_busy, 1000);
723 survey->time_rx = div_u64(state->cc_rx, 1000);
724 survey->time = div_u64(state->cc_active, 1000);
725 survey->noise = state->noise;
727 spin_lock_bh(&dev->cc_lock);
728 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
729 survey->time_tx = div_u64(state->cc_tx, 1000);
730 spin_unlock_bh(&dev->cc_lock);
733 mutex_unlock(&dev->mutex);
737 EXPORT_SYMBOL_GPL(mt76_get_survey);
739 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
740 struct ieee80211_key_conf *key)
742 struct ieee80211_key_seq seq;
745 wcid->rx_check_pn = false;
750 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
753 wcid->rx_check_pn = true;
754 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
755 ieee80211_get_key_rx_seq(key, i, &seq);
756 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
759 EXPORT_SYMBOL(mt76_wcid_key_setup);
762 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
763 struct ieee80211_hw **hw,
764 struct ieee80211_sta **sta)
766 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
767 struct mt76_rx_status mstat;
769 mstat = *((struct mt76_rx_status *)skb->cb);
770 memset(status, 0, sizeof(*status));
772 status->flag = mstat.flag;
773 status->freq = mstat.freq;
774 status->enc_flags = mstat.enc_flags;
775 status->encoding = mstat.encoding;
776 status->bw = mstat.bw;
777 status->he_ru = mstat.he_ru;
778 status->he_gi = mstat.he_gi;
779 status->he_dcm = mstat.he_dcm;
780 status->rate_idx = mstat.rate_idx;
781 status->nss = mstat.nss;
782 status->band = mstat.band;
783 status->signal = mstat.signal;
784 status->chains = mstat.chains;
785 status->ampdu_reference = mstat.ampdu_ref;
786 status->device_timestamp = mstat.timestamp;
787 status->mactime = mstat.timestamp;
789 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
790 BUILD_BUG_ON(sizeof(status->chain_signal) !=
791 sizeof(mstat.chain_signal));
792 memcpy(status->chain_signal, mstat.chain_signal,
793 sizeof(mstat.chain_signal));
795 *sta = wcid_to_sta(mstat.wcid);
796 *hw = mt76_phy_hw(dev, mstat.ext_phy);
800 mt76_check_ccmp_pn(struct sk_buff *skb)
802 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
803 struct mt76_wcid *wcid = status->wcid;
804 struct ieee80211_hdr *hdr;
805 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
808 if (!(status->flag & RX_FLAG_DECRYPTED))
811 if (!wcid || !wcid->rx_check_pn)
814 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
816 * Validate the first fragment both here and in mac80211
817 * All further fragments will be validated by mac80211 only.
819 hdr = mt76_skb_get_hdr(skb);
820 if (ieee80211_is_frag(hdr) &&
821 !ieee80211_is_first_frag(hdr->frame_control))
825 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
826 ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
829 return -EINVAL; /* replay */
831 memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
833 if (status->flag & RX_FLAG_IV_STRIPPED)
834 status->flag |= RX_FLAG_PN_VALIDATED;
840 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
843 struct mt76_wcid *wcid = status->wcid;
844 struct ieee80211_rx_status info = {
845 .enc_flags = status->enc_flags,
846 .rate_idx = status->rate_idx,
847 .encoding = status->encoding,
848 .band = status->band,
852 struct ieee80211_sta *sta;
854 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
856 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
857 spin_lock(&dev->cc_lock);
858 dev->cur_cc_bss_rx += airtime;
859 spin_unlock(&dev->cc_lock);
861 if (!wcid || !wcid->sta)
864 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
865 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
869 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
871 struct mt76_wcid *wcid;
874 if (!dev->rx_ampdu_len)
877 wcid_idx = dev->rx_ampdu_status.wcid_idx;
878 if (wcid_idx < ARRAY_SIZE(dev->wcid))
879 wcid = rcu_dereference(dev->wcid[wcid_idx]);
882 dev->rx_ampdu_status.wcid = wcid;
884 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
886 dev->rx_ampdu_len = 0;
887 dev->rx_ampdu_ref = 0;
891 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
893 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
894 struct mt76_wcid *wcid = status->wcid;
896 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
899 if (!wcid || !wcid->sta) {
900 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
902 if (status->flag & RX_FLAG_8023)
905 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
911 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
912 status->ampdu_ref != dev->rx_ampdu_ref)
913 mt76_airtime_flush_ampdu(dev);
915 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
916 if (!dev->rx_ampdu_len ||
917 status->ampdu_ref != dev->rx_ampdu_ref) {
918 dev->rx_ampdu_status = *status;
919 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
920 dev->rx_ampdu_ref = status->ampdu_ref;
923 dev->rx_ampdu_len += skb->len;
927 mt76_airtime_report(dev, status, skb->len);
931 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
933 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
934 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
935 struct ieee80211_sta *sta;
936 struct ieee80211_hw *hw;
937 struct mt76_wcid *wcid = status->wcid;
938 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
941 hw = mt76_phy_hw(dev, status->ext_phy);
942 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
943 !(status->flag & RX_FLAG_8023)) {
944 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
946 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
949 mt76_airtime_check(dev, skb);
951 if (!wcid || !wcid->sta)
954 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
956 if (status->signal <= 0)
957 ewma_signal_add(&wcid->rssi, -status->signal);
959 wcid->inactive_count = 0;
961 if (status->flag & RX_FLAG_8023)
964 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
967 if (ieee80211_is_pspoll(hdr->frame_control)) {
968 ieee80211_sta_pspoll(sta);
972 if (ieee80211_has_morefrags(hdr->frame_control) ||
973 !(ieee80211_is_mgmt(hdr->frame_control) ||
974 ieee80211_is_data(hdr->frame_control)))
977 ps = ieee80211_has_pm(hdr->frame_control);
979 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
980 ieee80211_is_qos_nullfunc(hdr->frame_control)))
981 ieee80211_sta_uapsd_trigger(sta, tidno);
983 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
987 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
989 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
991 dev->drv->sta_ps(dev, sta, ps);
992 ieee80211_sta_ps_transition(sta, ps);
995 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
996 struct napi_struct *napi)
998 struct ieee80211_sta *sta;
999 struct ieee80211_hw *hw;
1000 struct sk_buff *skb, *tmp;
1003 spin_lock(&dev->rx_lock);
1004 while ((skb = __skb_dequeue(frames)) != NULL) {
1005 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1007 if (mt76_check_ccmp_pn(skb)) {
1012 skb_shinfo(skb)->frag_list = NULL;
1013 mt76_rx_convert(dev, skb, &hw, &sta);
1014 ieee80211_rx_list(hw, sta, skb, &list);
1016 /* subsequent amsdu frames */
1022 mt76_rx_convert(dev, skb, &hw, &sta);
1023 ieee80211_rx_list(hw, sta, skb, &list);
1026 spin_unlock(&dev->rx_lock);
1029 netif_receive_skb_list(&list);
1033 list_for_each_entry_safe(skb, tmp, &list, list) {
1034 skb_list_del_init(skb);
1035 napi_gro_receive(napi, skb);
1039 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1040 struct napi_struct *napi)
1042 struct sk_buff_head frames;
1043 struct sk_buff *skb;
1045 __skb_queue_head_init(&frames);
1047 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1048 mt76_check_sta(dev, skb);
1049 mt76_rx_aggr_reorder(skb, &frames);
1052 mt76_rx_complete(dev, &frames, napi);
1054 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1057 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
1058 struct ieee80211_sta *sta, bool ext_phy)
1060 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1064 mutex_lock(&dev->mutex);
1066 ret = dev->drv->sta_add(dev, vif, sta);
1070 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1071 struct mt76_txq *mtxq;
1076 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1080 ewma_signal_init(&wcid->rssi);
1082 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1083 wcid->ext_phy = ext_phy;
1084 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1087 mutex_unlock(&dev->mutex);
1092 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1093 struct ieee80211_sta *sta)
1095 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1096 int i, idx = wcid->idx;
1098 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1099 mt76_rx_aggr_stop(dev, wcid, i);
1101 if (dev->drv->sta_remove)
1102 dev->drv->sta_remove(dev, vif, sta);
1104 mt76_tx_status_check(dev, wcid, true);
1105 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1106 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1108 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1111 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1112 struct ieee80211_sta *sta)
1114 mutex_lock(&dev->mutex);
1115 __mt76_sta_remove(dev, vif, sta);
1116 mutex_unlock(&dev->mutex);
1119 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1120 struct ieee80211_sta *sta,
1121 enum ieee80211_sta_state old_state,
1122 enum ieee80211_sta_state new_state)
1124 struct mt76_phy *phy = hw->priv;
1125 struct mt76_dev *dev = phy->dev;
1126 bool ext_phy = phy != &dev->phy;
1128 if (old_state == IEEE80211_STA_NOTEXIST &&
1129 new_state == IEEE80211_STA_NONE)
1130 return mt76_sta_add(dev, vif, sta, ext_phy);
1132 if (old_state == IEEE80211_STA_AUTH &&
1133 new_state == IEEE80211_STA_ASSOC &&
1134 dev->drv->sta_assoc)
1135 dev->drv->sta_assoc(dev, vif, sta);
1137 if (old_state == IEEE80211_STA_NONE &&
1138 new_state == IEEE80211_STA_NOTEXIST)
1139 mt76_sta_remove(dev, vif, sta);
1143 EXPORT_SYMBOL_GPL(mt76_sta_state);
1145 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1146 struct ieee80211_sta *sta)
1148 struct mt76_phy *phy = hw->priv;
1149 struct mt76_dev *dev = phy->dev;
1150 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1152 mutex_lock(&dev->mutex);
1153 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1154 mutex_unlock(&dev->mutex);
1156 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1158 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1161 struct mt76_phy *phy = hw->priv;
1162 int n_chains = hweight8(phy->antenna_mask);
1163 int delta = mt76_tx_power_nss_delta(n_chains);
1165 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1169 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1172 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1174 if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1175 ieee80211_csa_finish(vif);
1178 void mt76_csa_finish(struct mt76_dev *dev)
1180 if (!dev->csa_complete)
1183 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1184 IEEE80211_IFACE_ITER_RESUME_ALL,
1185 __mt76_csa_finish, dev);
1187 dev->csa_complete = 0;
1189 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1192 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1194 struct mt76_dev *dev = priv;
1196 if (!vif->csa_active)
1199 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1202 void mt76_csa_check(struct mt76_dev *dev)
1204 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1205 IEEE80211_IFACE_ITER_RESUME_ALL,
1206 __mt76_csa_check, dev);
1208 EXPORT_SYMBOL_GPL(mt76_csa_check);
1211 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1215 EXPORT_SYMBOL_GPL(mt76_set_tim);
1217 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1219 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1220 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1221 u8 *hdr, *pn = status->iv;
1224 memmove(skb->data, skb->data + 8, hdr_len);
1225 hdr = skb->data + hdr_len;
1230 hdr[3] = 0x20 | (key_id << 6);
1236 status->flag &= ~RX_FLAG_IV_STRIPPED;
1238 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1240 int mt76_get_rate(struct mt76_dev *dev,
1241 struct ieee80211_supported_band *sband,
1244 int i, offset = 0, len = sband->n_bitrates;
1247 if (sband == &dev->phy.sband_5g.sband)
1250 idx &= ~BIT(2); /* short preamble */
1251 } else if (sband == &dev->phy.sband_2g.sband) {
1255 for (i = offset; i < len; i++) {
1256 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1262 EXPORT_SYMBOL_GPL(mt76_get_rate);
1264 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1267 struct mt76_phy *phy = hw->priv;
1269 set_bit(MT76_SCANNING, &phy->state);
1271 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1273 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1275 struct mt76_phy *phy = hw->priv;
1277 clear_bit(MT76_SCANNING, &phy->state);
1279 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1281 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1283 struct mt76_phy *phy = hw->priv;
1284 struct mt76_dev *dev = phy->dev;
1286 mutex_lock(&dev->mutex);
1287 *tx_ant = phy->antenna_mask;
1288 *rx_ant = phy->antenna_mask;
1289 mutex_unlock(&dev->mutex);
1293 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1296 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1299 struct mt76_queue *hwq;
1302 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1304 return ERR_PTR(-ENOMEM);
1306 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1308 return ERR_PTR(err);
1312 EXPORT_SYMBOL_GPL(mt76_init_queue);