1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
12 #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 IEEE80211_RADIOTAP_HE_##f)
16 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
17 u16 idx, bool unicast)
19 struct mt7921_sta *sta;
20 struct mt76_wcid *wcid;
22 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
25 wcid = rcu_dereference(dev->mt76.wcid[idx]);
32 sta = container_of(wcid, struct mt7921_sta, wcid);
36 return &sta->vif->sta.wcid;
39 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
43 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
45 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
46 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
48 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
52 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
54 static const u8 ac_to_tid[] = {
55 [IEEE80211_AC_BE] = 0,
56 [IEEE80211_AC_BK] = 1,
57 [IEEE80211_AC_VI] = 4,
60 struct ieee80211_sta *sta;
61 struct mt7921_sta *msta;
62 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
63 LIST_HEAD(sta_poll_list);
66 spin_lock_bh(&dev->sta_poll_lock);
67 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
68 spin_unlock_bh(&dev->sta_poll_lock);
77 spin_lock_bh(&dev->sta_poll_lock);
78 if (list_empty(&sta_poll_list)) {
79 spin_unlock_bh(&dev->sta_poll_lock);
82 msta = list_first_entry(&sta_poll_list,
83 struct mt7921_sta, poll_list);
84 list_del_init(&msta->poll_list);
85 spin_unlock_bh(&dev->sta_poll_lock);
88 addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4;
90 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
91 u32 tx_last = msta->airtime_ac[i];
92 u32 rx_last = msta->airtime_ac[i + 4];
94 msta->airtime_ac[i] = mt76_rr(dev, addr);
95 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
97 tx_time[i] = msta->airtime_ac[i] - tx_last;
98 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
100 if ((tx_last | rx_last) & BIT(30))
107 mt7921_mac_wtbl_update(dev, idx,
108 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
109 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
115 sta = container_of((void *)msta, struct ieee80211_sta,
117 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
118 u8 q = mt7921_lmac_mapping(dev, i);
119 u32 tx_cur = tx_time[q];
120 u32 rx_cur = rx_time[q];
121 u8 tid = ac_to_tid[i];
123 if (!tx_cur && !rx_cur)
126 ieee80211_sta_register_airtime(sta, tid, tx_cur,
135 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
136 struct ieee80211_radiotap_he *he,
142 ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
143 ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
144 ru = (u8)(ru_l | ru_h << 4);
146 status->bw = RATE_INFO_BW_HE_RU;
150 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
154 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
158 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
162 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
166 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
170 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
173 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
177 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
178 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
179 le16_encode_bits(offs,
180 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
184 mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
185 struct mt76_rx_status *status,
186 __le32 *rxv, u32 phy)
188 /* TODO: struct ieee80211_radiotap_he_mu */
189 static const struct ieee80211_radiotap_he known = {
190 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
191 HE_BITS(DATA1_DATA_DCM_KNOWN) |
192 HE_BITS(DATA1_STBC_KNOWN) |
193 HE_BITS(DATA1_CODING_KNOWN) |
194 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
195 HE_BITS(DATA1_DOPPLER_KNOWN) |
196 HE_BITS(DATA1_BSS_COLOR_KNOWN),
197 .data2 = HE_BITS(DATA2_GI_KNOWN) |
198 HE_BITS(DATA2_TXBF_KNOWN) |
199 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
200 HE_BITS(DATA2_TXOP_KNOWN),
202 struct ieee80211_radiotap_he *he = NULL;
203 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
205 he = skb_push(skb, sizeof(known));
206 memcpy(he, &known, sizeof(known));
208 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
209 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
210 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
211 le16_encode_bits(ltf_size,
212 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
213 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
214 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
217 case MT_PHY_TYPE_HE_SU:
218 he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
219 HE_BITS(DATA1_UL_DL_KNOWN) |
220 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
221 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
223 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
224 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
225 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
227 case MT_PHY_TYPE_HE_EXT_SU:
228 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
229 HE_BITS(DATA1_UL_DL_KNOWN);
231 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
233 case MT_PHY_TYPE_HE_MU:
234 he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
235 HE_BITS(DATA1_UL_DL_KNOWN) |
236 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
238 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
239 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
241 mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
243 case MT_PHY_TYPE_HE_TB:
244 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
245 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
246 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
247 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
248 HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
250 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
251 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
252 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
253 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
255 mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
263 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
264 struct mt76_rx_status *status, u8 chfreq)
266 if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
267 !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
268 !test_bit(MT76_STATE_ROC, &mphy->state)) {
269 status->freq = mphy->chandef.chan->center_freq;
270 status->band = mphy->chandef.chan->band;
274 status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
275 status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
279 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
281 struct sk_buff *skb = priv;
282 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
283 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
284 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
286 if (status->signal > 0)
289 if (!ether_addr_equal(vif->addr, hdr->addr1))
292 ewma_rssi_add(&mvif->rssi, -status->signal);
296 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
298 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
300 if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
301 !ieee80211_is_auth(hdr->frame_control))
304 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
305 IEEE80211_IFACE_ITER_RESUME_ALL,
306 mt7921_mac_rssi_iter, skb);
309 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
311 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
312 struct mt76_phy *mphy = &dev->mt76.phy;
313 struct mt7921_phy *phy = &dev->phy;
314 struct ieee80211_supported_band *sband;
315 struct ieee80211_hdr *hdr;
316 __le32 *rxd = (__le32 *)skb->data;
319 u32 rxd1 = le32_to_cpu(rxd[1]);
320 u32 rxd2 = le32_to_cpu(rxd[2]);
321 u32 rxd3 = le32_to_cpu(rxd[3]);
322 bool unicast, insert_ccmp_hdr = false;
327 memset(status, 0, sizeof(*status));
329 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
332 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
335 chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
336 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
337 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
338 status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
341 struct mt7921_sta *msta;
343 msta = container_of(status->wcid, struct mt7921_sta, wcid);
344 spin_lock_bh(&dev->sta_poll_lock);
345 if (list_empty(&msta->poll_list))
346 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
347 spin_unlock_bh(&dev->sta_poll_lock);
350 mt7921_get_status_freq_info(dev, mphy, status, chfreq);
352 if (status->band == NL80211_BAND_5GHZ)
353 sband = &mphy->sband_5g.sband;
355 sband = &mphy->sband_2g.sband;
357 if (!sband->channels)
360 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
361 status->flag |= RX_FLAG_FAILED_FCS_CRC;
363 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
364 status->flag |= RX_FLAG_MMIC_ERROR;
366 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
367 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
368 status->flag |= RX_FLAG_DECRYPTED;
369 status->flag |= RX_FLAG_IV_STRIPPED;
370 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
373 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
375 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
379 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
381 if ((u8 *)rxd - skb->data >= skb->len)
385 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
386 u8 *data = (u8 *)rxd;
388 if (status->flag & RX_FLAG_DECRYPTED) {
389 status->iv[0] = data[5];
390 status->iv[1] = data[4];
391 status->iv[2] = data[3];
392 status->iv[3] = data[2];
393 status->iv[4] = data[1];
394 status->iv[5] = data[0];
396 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
399 if ((u8 *)rxd - skb->data >= skb->len)
403 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
404 status->timestamp = le32_to_cpu(rxd[0]);
405 status->flag |= RX_FLAG_MACTIME_START;
407 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
408 status->flag |= RX_FLAG_AMPDU_DETAILS;
410 /* all subframes of an A-MPDU have the same timestamp */
411 if (phy->rx_ampdu_ts != status->timestamp) {
412 if (!++phy->ampdu_ref)
415 phy->rx_ampdu_ts = status->timestamp;
417 status->ampdu_ref = phy->ampdu_ref;
421 if ((u8 *)rxd - skb->data >= skb->len)
425 /* RXD Group 3 - P-RXV */
426 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
433 if ((u8 *)rxd - skb->data >= skb->len)
436 v0 = le32_to_cpu(rxv[0]);
437 v1 = le32_to_cpu(rxv[1]);
439 if (v0 & MT_PRXV_HT_AD_CODE)
440 status->enc_flags |= RX_ENC_FLAG_LDPC;
442 status->chains = mphy->antenna_mask;
443 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
444 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
445 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
446 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
447 status->signal = status->chain_signal[0];
449 for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
450 if (!(status->chains & BIT(i)))
453 status->signal = max(status->signal,
454 status->chain_signal[i]);
457 stbc = FIELD_GET(MT_PRXV_STBC, v0);
458 gi = FIELD_GET(MT_PRXV_SGI, v0);
461 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
462 mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
465 case MT_PHY_TYPE_CCK:
468 case MT_PHY_TYPE_OFDM:
469 i = mt76_get_rate(&dev->mt76, sband, i, cck);
471 case MT_PHY_TYPE_HT_GF:
473 status->encoding = RX_ENC_HT;
477 case MT_PHY_TYPE_VHT:
479 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
480 status->encoding = RX_ENC_VHT;
484 case MT_PHY_TYPE_HE_MU:
485 status->flag |= RX_FLAG_RADIOTAP_HE_MU;
487 case MT_PHY_TYPE_HE_SU:
488 case MT_PHY_TYPE_HE_EXT_SU:
489 case MT_PHY_TYPE_HE_TB:
491 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
492 status->encoding = RX_ENC_HE;
493 status->flag |= RX_FLAG_RADIOTAP_HE;
496 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
499 status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
505 status->rate_idx = i;
507 switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
508 case IEEE80211_STA_RX_BW_20:
510 case IEEE80211_STA_RX_BW_40:
511 if (mode & MT_PHY_TYPE_HE_EXT_SU &&
512 (idx & MT_PRXV_TX_ER_SU_106T)) {
513 status->bw = RATE_INFO_BW_HE_RU;
515 NL80211_RATE_INFO_HE_RU_ALLOC_106;
517 status->bw = RATE_INFO_BW_40;
520 case IEEE80211_STA_RX_BW_80:
521 status->bw = RATE_INFO_BW_80;
523 case IEEE80211_STA_RX_BW_160:
524 status->bw = RATE_INFO_BW_160;
530 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
531 if (mode < MT_PHY_TYPE_HE_SU && gi)
532 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
534 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
536 if ((u8 *)rxd - skb->data >= skb->len)
541 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
543 if (insert_ccmp_hdr) {
544 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
546 mt76_insert_ccmp_hdr(skb, key_id);
549 mt7921_mac_assoc_rssi(dev, skb);
551 if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
552 mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
554 hdr = mt76_skb_get_hdr(skb);
555 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
558 status->aggr = unicast &&
559 !ieee80211_is_qos_nullfunc(hdr->frame_control);
560 status->qos_ctl = *ieee80211_get_qos_ctl(hdr);
561 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
567 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
568 struct sk_buff *skb, struct mt76_wcid *wcid)
570 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
571 u8 fc_type, fc_stype;
576 struct ieee80211_sta *sta;
578 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
582 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
583 FIELD_PREP(MT_TXD1_TID, tid);
585 if (be16_to_cpu(skb->protocol) >= ETH_P_802_3_MIN)
586 val |= MT_TXD1_ETH_802_3;
588 txwi[1] |= cpu_to_le32(val);
590 fc_type = IEEE80211_FTYPE_DATA >> 2;
591 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
593 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
594 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
596 txwi[2] |= cpu_to_le32(val);
598 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
599 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
600 txwi[7] |= cpu_to_le32(val);
604 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
605 struct sk_buff *skb, struct ieee80211_key_conf *key)
607 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
608 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
609 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
610 bool multicast = is_multicast_ether_addr(hdr->addr1);
611 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
612 __le16 fc = hdr->frame_control;
613 u8 fc_type, fc_stype;
616 if (ieee80211_is_action(fc) &&
617 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
618 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
619 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
621 txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
622 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
623 } else if (ieee80211_is_back_req(hdr->frame_control)) {
624 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
625 u16 control = le16_to_cpu(bar->control);
627 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
630 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
631 FIELD_PREP(MT_TXD1_HDR_INFO,
632 ieee80211_get_hdrlen_from_skb(skb) / 2) |
633 FIELD_PREP(MT_TXD1_TID, tid);
634 txwi[1] |= cpu_to_le32(val);
636 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
637 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
639 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
640 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
641 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
643 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
644 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
646 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
649 if (!ieee80211_is_data(fc) || multicast)
650 val |= MT_TXD2_FIX_RATE;
652 txwi[2] |= cpu_to_le32(val);
654 if (ieee80211_is_beacon(fc)) {
655 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
656 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
659 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
660 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
662 if (ieee80211_is_back_req(hdr->frame_control)) {
663 struct ieee80211_bar *bar;
665 bar = (struct ieee80211_bar *)skb->data;
666 seqno = le16_to_cpu(bar->start_seq_num);
669 val = MT_TXD3_SN_VALID |
670 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
671 txwi[3] |= cpu_to_le32(val);
674 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
675 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
676 txwi[7] |= cpu_to_le32(val);
679 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
680 struct sk_buff *skb, struct mt76_wcid *wcid,
681 struct ieee80211_key_conf *key, bool beacon)
683 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
684 struct ieee80211_vif *vif = info->control.vif;
685 struct mt76_phy *mphy = &dev->mphy;
686 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
687 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
692 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
694 omac_idx = mvif->omac_idx;
695 wmm_idx = mvif->wmm_idx;
699 p_fmt = MT_TX_TYPE_FW;
700 q_idx = MT_LMAC_BCN0;
701 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
702 p_fmt = MT_TX_TYPE_CT;
703 q_idx = MT_LMAC_ALTX0;
705 p_fmt = MT_TX_TYPE_CT;
706 q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
707 mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
710 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
711 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
712 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
713 txwi[0] = cpu_to_le32(val);
715 val = MT_TXD1_LONG_FORMAT |
716 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
717 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
719 txwi[1] = cpu_to_le32(val);
722 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
724 val |= MT_TXD3_PROTECT_FRAME;
725 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
726 val |= MT_TXD3_NO_ACK;
728 txwi[3] = cpu_to_le32(val);
732 txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
735 mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
737 mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
739 if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
742 /* hardware won't add HTC for mgmt/ctrl frame */
743 txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
745 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
746 rate = MT7921_5G_RATE_DEFAULT;
748 rate = MT7921_2G_RATE_DEFAULT;
750 val = MT_TXD6_FIXED_BW |
751 FIELD_PREP(MT_TXD6_TX_RATE, rate);
752 txwi[6] |= cpu_to_le32(val);
753 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
758 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
759 void *txp_ptr, u32 id)
761 struct mt7921_hw_txp *txp = txp_ptr;
762 struct mt7921_txp_ptr *ptr = &txp->ptr[0];
763 int i, nbuf = tx_info->nbuf - 1;
765 tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
768 txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
770 for (i = 0; i < nbuf; i++) {
771 u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
772 u32 addr = tx_info->buf[i + 1].addr;
775 len |= MT_TXD_LEN_LAST;
778 ptr->buf1 = cpu_to_le32(addr);
779 ptr->len1 = cpu_to_le16(len);
782 ptr->buf0 = cpu_to_le32(addr);
783 ptr->len0 = cpu_to_le16(len);
788 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
789 enum mt76_txq_id qid, struct mt76_wcid *wcid,
790 struct ieee80211_sta *sta,
791 struct mt76_tx_info *tx_info)
793 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
794 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
795 struct ieee80211_key_conf *key = info->control.hw_key;
796 struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
797 struct mt76_txwi_cache *t;
798 struct mt7921_txp_common *txp;
800 u8 *txwi = (u8 *)txwi_ptr;
802 if (unlikely(tx_info->skb->len <= ETH_HLEN))
806 wcid = &dev->mt76.global_wcid;
808 cb->wcid = wcid->idx;
810 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
811 t->skb = tx_info->skb;
813 id = mt76_token_consume(mdev, &t);
817 mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
820 txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
821 memset(txp, 0, sizeof(struct mt7921_txp_common));
822 mt7921_write_hw_txp(dev, tx_info, txp, id);
824 tx_info->skb = DMA_DUMMY_DATA;
830 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
832 struct mt7921_sta *msta;
836 if (!sta || !sta->ht_cap.ht_supported)
839 tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
840 if (tid >= 6) /* skip VO queue */
843 val = le32_to_cpu(txwi[2]);
844 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
845 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
846 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
849 msta = (struct mt7921_sta *)sta->drv_priv;
850 if (!test_and_set_bit(tid, &msta->ampdu_state))
851 ieee80211_start_tx_ba_session(sta, tid, 0);
855 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
856 struct ieee80211_sta *sta, u8 stat,
857 struct list_head *free_list)
859 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
860 struct ieee80211_tx_status status = {
864 .free_list = free_list,
866 struct ieee80211_hw *hw;
869 struct mt7921_sta *msta;
871 msta = (struct mt7921_sta *)sta->drv_priv;
872 status.rate = &msta->stats.tx_rate;
875 hw = mt76_tx_status_get_hw(mdev, skb);
877 if (info->flags & IEEE80211_TX_CTL_AMPDU)
878 info->flags |= IEEE80211_TX_STAT_AMPDU;
881 ieee80211_tx_info_clear_status(info);
883 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
884 info->flags |= IEEE80211_TX_STAT_ACK;
886 info->status.tx_time = 0;
887 ieee80211_tx_status_ext(hw, &status);
890 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
891 struct mt76_txwi_cache *t)
893 struct mt7921_txp_common *txp;
896 txp = mt7921_txwi_to_txp(dev, t);
898 for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
899 struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
903 len = le16_to_cpu(ptr->len0);
904 last = len & MT_TXD_LEN_LAST;
905 len &= MT_TXD_LEN_MASK;
906 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
911 len = le16_to_cpu(ptr->len1);
912 last = len & MT_TXD_LEN_LAST;
913 len &= MT_TXD_LEN_MASK;
914 dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
921 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
923 struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
924 struct mt76_dev *mdev = &dev->mt76;
925 struct mt76_txwi_cache *txwi;
926 struct ieee80211_sta *sta = NULL;
927 LIST_HEAD(free_list);
932 /* clean DMA queues and unmap buffers first */
933 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
934 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
936 /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
937 * to the time ack is received or dropped by hw (air + hw queue time).
938 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
940 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
941 for (i = 0; i < count; i++) {
942 u32 msdu, info = le32_to_cpu(free->info[i]);
945 /* 1'b1: new wcid pair.
946 * 1'b0: msdu_id with the same 'wcid pair' as above.
948 if (info & MT_TX_FREE_PAIR) {
949 struct mt7921_sta *msta;
950 struct mt7921_phy *phy;
951 struct mt76_wcid *wcid;
955 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
956 wcid = rcu_dereference(dev->mt76.wcid[idx]);
957 sta = wcid_to_sta(wcid);
961 msta = container_of(wcid, struct mt7921_sta, wcid);
962 phy = msta->vif->phy;
963 spin_lock_bh(&dev->sta_poll_lock);
964 if (list_empty(&msta->stats_list))
965 list_add_tail(&msta->stats_list, &phy->stats_list);
966 if (list_empty(&msta->poll_list))
967 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
968 spin_unlock_bh(&dev->sta_poll_lock);
972 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
973 stat = FIELD_GET(MT_TX_FREE_STATUS, info);
975 txwi = mt76_token_release(mdev, msdu, &wake);
979 mt7921_txp_skb_unmap(mdev, txwi);
981 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
982 void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
984 if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
985 mt7921_tx_check_aggr(sta, txwi_ptr);
987 if (sta && !info->tx_time_est) {
988 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
991 pending = atomic_dec_return(&wcid->non_aql_packets);
993 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
996 mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
1000 mt76_put_txwi(mdev, txwi);
1004 mt76_set_tx_blocked(&dev->mt76, false);
1006 napi_consume_skb(skb, 1);
1008 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1009 skb_list_del_init(skb);
1010 napi_consume_skb(skb, 1);
1013 mt7921_mac_sta_poll(dev);
1014 mt76_worker_schedule(&dev->mt76.tx_worker);
1017 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1019 struct mt7921_dev *dev;
1022 dev_kfree_skb_any(e->skb);
1026 dev = container_of(mdev, struct mt7921_dev, mt76);
1029 if (e->skb == DMA_DUMMY_DATA) {
1030 struct mt76_txwi_cache *t;
1031 struct mt7921_txp_common *txp;
1034 txp = mt7921_txwi_to_txp(mdev, e->txwi);
1035 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
1036 t = mt76_token_put(mdev, token);
1037 e->skb = t ? t->skb : NULL;
1041 struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1042 struct mt76_wcid *wcid;
1044 wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1046 mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1051 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
1053 struct mt7921_dev *dev = phy->dev;
1056 for (i = 0; i < 4; i++) {
1057 mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1058 mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1061 dev->mt76.phy.survey_time = ktime_get_boottime();
1062 memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
1064 /* reset airtime counters */
1065 mt76_rr(dev, MT_MIB_SDR9(0));
1066 mt76_rr(dev, MT_MIB_SDR36(0));
1067 mt76_rr(dev, MT_MIB_SDR37(0));
1069 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1070 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1073 void mt7921_mac_set_timing(struct mt7921_phy *phy)
1075 s16 coverage_class = phy->coverage_class;
1076 struct mt7921_dev *dev = phy->dev;
1077 u32 val, reg_offset;
1078 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1079 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1080 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1081 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1083 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1085 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1093 mt76_set(dev, MT_ARB_SCR(0),
1094 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1097 offset = 3 * coverage_class;
1098 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1099 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1101 mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
1102 mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
1103 mt76_wr(dev, MT_TMAC_ICR0(0),
1104 FIELD_PREP(MT_IFS_EIFS, 360) |
1105 FIELD_PREP(MT_IFS_RIFS, 2) |
1106 FIELD_PREP(MT_IFS_SIFS, sifs) |
1107 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1109 if (phy->slottime < 20 || is_5ghz)
1110 val = MT7921_CFEND_RATE_DEFAULT;
1112 val = MT7921_CFEND_RATE_11B;
1114 mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
1115 mt76_clear(dev, MT_ARB_SCR(0),
1116 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1120 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
1126 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
1128 struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1129 struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
1130 struct mt76_channel_state *state;
1131 u64 busy_time, tx_time, rx_time, obss_time;
1134 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1135 MT_MIB_SDR9_BUSY_MASK);
1136 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1137 MT_MIB_SDR36_TXTIME_MASK);
1138 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1139 MT_MIB_SDR37_RXTIME_MASK);
1140 obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1141 MT_MIB_OBSSTIME_MASK);
1143 nf = mt7921_phy_get_nf(phy, idx);
1145 phy->noise = nf << 4;
1147 phy->noise += nf - (phy->noise >> 4);
1149 state = mphy->chan_state;
1150 state->cc_busy += busy_time;
1151 state->cc_tx += tx_time;
1152 state->cc_rx += rx_time + obss_time;
1153 state->cc_bss_rx += rx_time;
1154 state->noise = -(phy->noise >> 4);
1157 void mt7921_update_channel(struct mt76_dev *mdev)
1159 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1161 if (mt76_connac_pm_wake(&dev->mphy, &dev->pm))
1164 mt7921_phy_update_channel(&mdev->phy, 0);
1165 /* reset obss airtime */
1166 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1168 mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
1171 void mt7921_tx_token_put(struct mt7921_dev *dev)
1173 struct mt76_txwi_cache *txwi;
1176 spin_lock_bh(&dev->mt76.token_lock);
1177 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1178 mt7921_txp_skb_unmap(&dev->mt76, txwi);
1180 struct ieee80211_hw *hw;
1182 hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1183 ieee80211_free_txskb(hw, txwi->skb);
1185 mt76_put_txwi(&dev->mt76, txwi);
1186 dev->mt76.token_count--;
1188 spin_unlock_bh(&dev->mt76.token_lock);
1189 idr_destroy(&dev->mt76.token);
1193 mt7921_vif_connect_iter(void *priv, u8 *mac,
1194 struct ieee80211_vif *vif)
1196 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1197 struct mt7921_dev *dev = mvif->phy->dev;
1199 ieee80211_disconnect(vif, true);
1201 mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
1202 mt7921_mcu_set_tx(dev, vif);
1206 mt7921_mac_reset(struct mt7921_dev *dev)
1210 mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
1212 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
1213 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1215 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1216 wake_up(&dev->mt76.mcu.wait);
1217 skb_queue_purge(&dev->mt76.mcu.res_q);
1219 mt76_txq_schedule_all(&dev->mphy);
1221 mt76_worker_disable(&dev->mt76.tx_worker);
1222 napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
1223 napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
1224 napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
1225 napi_disable(&dev->mt76.tx_napi);
1227 mt7921_tx_token_put(dev);
1228 idr_init(&dev->mt76.token);
1230 err = mt7921_wpdma_reset(dev, true);
1234 mt76_for_each_q_rx(&dev->mt76, i) {
1235 napi_enable(&dev->mt76.napi[i]);
1236 napi_schedule(&dev->mt76.napi[i]);
1239 napi_enable(&dev->mt76.tx_napi);
1240 napi_schedule(&dev->mt76.tx_napi);
1241 mt76_worker_enable(&dev->mt76.tx_worker);
1243 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1244 clear_bit(MT76_STATE_PM, &dev->mphy.state);
1246 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
1247 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1249 err = mt7921_run_firmware(dev);
1253 err = mt7921_mcu_set_eeprom(dev);
1257 mt7921_mac_init(dev);
1258 return __mt7921_start(&dev->phy);
1261 /* system error recovery */
1262 void mt7921_mac_reset_work(struct work_struct *work)
1264 struct ieee80211_hw *hw;
1265 struct mt7921_dev *dev;
1268 dev = container_of(work, struct mt7921_dev, reset_work);
1271 dev_err(dev->mt76.dev, "chip reset\n");
1272 ieee80211_stop_queues(hw);
1274 cancel_delayed_work_sync(&dev->mphy.mac_work);
1275 cancel_delayed_work_sync(&dev->pm.ps_work);
1276 cancel_work_sync(&dev->pm.wake_work);
1278 mutex_lock(&dev->mt76.mutex);
1279 for (i = 0; i < 10; i++) {
1280 if (!mt7921_mac_reset(dev))
1283 mutex_unlock(&dev->mt76.mutex);
1286 dev_err(dev->mt76.dev, "chip reset failed\n");
1288 if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1289 struct cfg80211_scan_info info = {
1293 ieee80211_scan_completed(dev->mphy.hw, &info);
1296 ieee80211_wake_queues(hw);
1297 ieee80211_iterate_active_interfaces(hw,
1298 IEEE80211_IFACE_ITER_RESUME_ALL,
1299 mt7921_vif_connect_iter, NULL);
1302 void mt7921_reset(struct mt76_dev *mdev)
1304 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1306 queue_work(dev->mt76.wq, &dev->reset_work);
1310 mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
1312 struct mt7921_dev *dev = phy->dev;
1313 struct mib_stats *mib = &phy->mib;
1314 int i, aggr0 = 0, aggr1;
1316 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
1317 MT_MIB_SDR3_FCS_ERR_MASK);
1318 mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
1319 MT_MIB_ACK_FAIL_COUNT_MASK);
1320 mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
1321 MT_MIB_BA_FAIL_COUNT_MASK);
1322 mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
1323 MT_MIB_RTS_COUNT_MASK);
1324 mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
1325 MT_MIB_RTS_FAIL_COUNT_MASK);
1327 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1330 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1331 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1333 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1334 dev->mt76.aggr_stats[aggr0++] += val >> 16;
1335 dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
1336 dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
1341 mt7921_mac_sta_stats_work(struct mt7921_phy *phy)
1343 struct mt7921_dev *dev = phy->dev;
1344 struct mt7921_sta *msta;
1347 spin_lock_bh(&dev->sta_poll_lock);
1348 list_splice_init(&phy->stats_list, &list);
1350 while (!list_empty(&list)) {
1351 msta = list_first_entry(&list, struct mt7921_sta, stats_list);
1352 list_del_init(&msta->stats_list);
1353 spin_unlock_bh(&dev->sta_poll_lock);
1355 /* query wtbl info to report tx rate for further devices */
1356 mt7921_get_wtbl_info(dev, msta->wcid.idx);
1358 spin_lock_bh(&dev->sta_poll_lock);
1361 spin_unlock_bh(&dev->sta_poll_lock);
1364 void mt7921_mac_work(struct work_struct *work)
1366 struct mt7921_phy *phy;
1367 struct mt76_phy *mphy;
1369 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1373 mt7921_mutex_acquire(phy->dev);
1375 mt76_update_survey(mphy->dev);
1376 if (++mphy->mac_work_count == 2) {
1377 mphy->mac_work_count = 0;
1379 mt7921_mac_update_mib_stats(phy);
1381 if (++phy->sta_work_count == 4) {
1382 phy->sta_work_count = 0;
1383 mt7921_mac_sta_stats_work(phy);
1386 mt7921_mutex_release(phy->dev);
1387 ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1388 MT7921_WATCHDOG_TIME);
1391 void mt7921_pm_wake_work(struct work_struct *work)
1393 struct mt7921_dev *dev;
1394 struct mt76_phy *mphy;
1396 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1398 mphy = dev->phy.mt76;
1400 if (!mt7921_mcu_drv_pmctrl(dev)) {
1403 mt76_for_each_q_rx(&dev->mt76, i)
1404 napi_schedule(&dev->mt76.napi[i]);
1405 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1406 mt7921_tx_cleanup(dev);
1407 if (test_bit(MT76_STATE_RUNNING, &mphy->state))
1408 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1409 MT7921_WATCHDOG_TIME);
1412 ieee80211_wake_queues(mphy->hw);
1413 wake_up(&dev->pm.wait);
1416 void mt7921_pm_power_save_work(struct work_struct *work)
1418 struct mt7921_dev *dev;
1419 unsigned long delta;
1421 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1424 delta = dev->pm.idle_timeout;
1425 if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
1426 test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
1429 if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1430 delta = dev->pm.last_activity + delta - jiffies;
1434 if (!mt7921_mcu_fw_pmctrl(dev))
1437 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1440 int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
1441 struct ieee80211_vif *vif,
1444 struct mt7921_dev *dev = phy->dev;
1445 bool ext_phy = phy != &dev->phy;
1448 if (!dev->pm.enable)
1451 err = mt7921_mcu_set_bss_pm(dev, vif, enable);
1456 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
1457 mt76_set(dev, MT_WF_RFCR(ext_phy),
1458 MT_WF_RFCR_DROP_OTHER_BEACON);
1460 vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
1461 mt76_clear(dev, MT_WF_RFCR(ext_phy),
1462 MT_WF_RFCR_DROP_OTHER_BEACON);
1468 void mt7921_coredump_work(struct work_struct *work)
1470 struct mt7921_dev *dev;
1473 dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1474 coredump.work.work);
1476 if (time_is_after_jiffies(dev->coredump.last_activity +
1477 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1478 queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1479 MT76_CONNAC_COREDUMP_TIMEOUT);
1483 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1487 struct sk_buff *skb;
1489 spin_lock_bh(&dev->mt76.lock);
1490 skb = __skb_dequeue(&dev->coredump.msg_list);
1491 spin_unlock_bh(&dev->mt76.lock);
1496 skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
1497 if (data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1502 memcpy(data, skb->data, skb->len);
1507 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1509 mt7921_reset(&dev->mt76);