1 // SPDX-License-Identifier: ISC
3 #include <linux/etherdevice.h>
4 #include <linux/timekeeping.h>
9 #define MT_PSE_PAGE_SIZE 128
12 mt7603_ac_queue_mask0(u32 mask)
16 ret |= GENMASK(3, 0) * !!(mask & BIT(0));
17 ret |= GENMASK(8, 5) * !!(mask & BIT(1));
18 ret |= GENMASK(13, 10) * !!(mask & BIT(2));
19 ret |= GENMASK(19, 16) * !!(mask & BIT(3));
24 mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask)
26 mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask));
30 mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask)
32 mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask));
35 void mt7603_mac_reset_counters(struct mt7603_dev *dev)
39 for (i = 0; i < 2; i++)
40 mt76_rr(dev, MT_TX_AGG_CNT(i));
42 memset(dev->mt76.aggr_stats, 0, sizeof(dev->mt76.aggr_stats));
45 void mt7603_mac_set_timing(struct mt7603_dev *dev)
47 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
48 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
49 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
50 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24);
51 int offset = 3 * dev->coverage_class;
52 u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
53 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
57 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
62 mt76_set(dev, MT_ARB_SCR,
63 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
66 mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset);
67 mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset);
69 FIELD_PREP(MT_IFS_EIFS, 360) |
70 FIELD_PREP(MT_IFS_RIFS, 2) |
71 FIELD_PREP(MT_IFS_SIFS, sifs) |
72 FIELD_PREP(MT_IFS_SLOT, dev->slottime));
74 if (dev->slottime < 20)
75 val = MT7603_CFEND_RATE_DEFAULT;
77 val = MT7603_CFEND_RATE_11B;
79 mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val);
81 mt76_clear(dev, MT_ARB_SCR,
82 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
86 mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask)
88 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
89 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
91 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
95 mt7603_wtbl1_addr(int idx)
97 return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
101 mt7603_wtbl2_addr(int idx)
103 /* Mapped to WTBL2 */
104 return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE;
108 mt7603_wtbl3_addr(int idx)
110 u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE);
112 return base + idx * MT_WTBL3_SIZE;
116 mt7603_wtbl4_addr(int idx)
118 u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE);
120 return base + idx * MT_WTBL4_SIZE;
123 void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif,
126 const void *_mac = mac_addr;
127 u32 addr = mt7603_wtbl1_addr(idx);
132 w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI,
133 get_unaligned_le16(_mac + 4));
134 w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO,
135 get_unaligned_le32(_mac));
141 w0 |= MT_WTBL1_W0_RX_CHECK_A1;
142 w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif);
144 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
146 mt76_set(dev, addr + 0 * 4, w0);
147 mt76_set(dev, addr + 1 * 4, w1);
148 mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL);
150 mt76_stop_tx_ac(dev, GENMASK(3, 0));
151 addr = mt7603_wtbl2_addr(idx);
152 for (i = 0; i < MT_WTBL2_SIZE; i += 4)
153 mt76_wr(dev, addr + i, 0);
154 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
155 mt76_start_tx_ac(dev, GENMASK(3, 0));
157 addr = mt7603_wtbl3_addr(idx);
158 for (i = 0; i < MT_WTBL3_SIZE; i += 4)
159 mt76_wr(dev, addr + i, 0);
161 addr = mt7603_wtbl4_addr(idx);
162 for (i = 0; i < MT_WTBL4_SIZE; i += 4)
163 mt76_wr(dev, addr + i, 0);
165 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
169 mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled)
171 u32 addr = mt7603_wtbl1_addr(idx);
172 u32 val = mt76_rr(dev, addr + 3 * 4);
174 val &= ~MT_WTBL1_W3_SKIP_TX;
175 val |= enabled * MT_WTBL1_W3_SKIP_TX;
177 mt76_wr(dev, addr + 3 * 4, val);
180 void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort)
186 queue = 8; /* free queue */
189 queue = 1; /* MCU queue */
192 mt7603_wtbl_set_skip_tx(dev, idx, true);
194 mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN |
195 FIELD_PREP(MT_TX_ABORT_WCID, idx));
197 for (i = 0; i < 4; i++) {
198 mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
199 FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) |
200 FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) |
201 FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) |
202 FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue));
204 WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY,
208 mt76_wr(dev, MT_TX_ABORT, 0);
210 mt7603_wtbl_set_skip_tx(dev, idx, false);
213 void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
216 u32 addr = mt7603_wtbl1_addr(sta->wcid.idx);
218 if (sta->smps == enabled)
221 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled);
225 void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta,
228 int idx = sta->wcid.idx;
231 spin_lock_bh(&dev->ps_lock);
233 if (sta->ps == enabled)
236 mt76_wr(dev, MT_PSE_RTA,
237 FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) |
238 FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) |
239 FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) |
240 FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) |
241 MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY);
243 mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000);
246 mt7603_filter_tx(dev, idx, false);
248 addr = mt7603_wtbl1_addr(idx);
249 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
250 mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE,
251 enabled * MT_WTBL1_W3_POWER_SAVE);
252 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
256 spin_unlock_bh(&dev->ps_lock);
259 void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx)
261 int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE;
262 int wtbl2_frame = idx / wtbl2_frame_size;
263 int wtbl2_entry = idx % wtbl2_frame_size;
265 int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE;
266 int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE;
267 int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size;
268 int wtbl3_entry = (idx % wtbl3_frame_size) * 2;
270 int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE;
271 int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE;
272 int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size;
273 int wtbl4_entry = idx % wtbl4_frame_size;
275 u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE;
278 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
280 mt76_wr(dev, addr + 0 * 4,
281 MT_WTBL1_W0_RX_CHECK_A1 |
282 MT_WTBL1_W0_RX_CHECK_A2 |
283 MT_WTBL1_W0_RX_VALID);
284 mt76_wr(dev, addr + 1 * 4, 0);
285 mt76_wr(dev, addr + 2 * 4, 0);
287 mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
289 mt76_wr(dev, addr + 3 * 4,
290 FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) |
291 FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) |
292 FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) |
293 MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM);
294 mt76_wr(dev, addr + 4 * 4,
295 FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) |
296 FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) |
297 FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry));
299 mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE);
301 addr = mt7603_wtbl2_addr(idx);
303 /* Clear BA information */
304 mt76_wr(dev, addr + (15 * 4), 0);
306 mt76_stop_tx_ac(dev, GENMASK(3, 0));
307 for (i = 2; i <= 4; i++)
308 mt76_wr(dev, addr + (i * 4), 0);
309 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2);
310 mt76_start_tx_ac(dev, GENMASK(3, 0));
312 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR);
313 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR);
314 mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
317 void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta)
319 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
320 int idx = msta->wcid.idx;
324 addr = mt7603_wtbl1_addr(idx);
326 val = mt76_rr(dev, addr + 2 * 4);
327 val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL;
328 val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) |
329 FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) |
330 MT_WTBL1_W2_TXS_BAF_REPORT;
333 val |= MT_WTBL1_W2_HT;
334 if (sta->vht_cap.cap)
335 val |= MT_WTBL1_W2_VHT;
337 mt76_wr(dev, addr + 2 * 4, val);
339 addr = mt7603_wtbl2_addr(idx);
340 val = mt76_rr(dev, addr + 9 * 4);
341 val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
342 MT_WTBL2_W9_SHORT_GI_80);
343 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
344 val |= MT_WTBL2_W9_SHORT_GI_20;
345 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
346 val |= MT_WTBL2_W9_SHORT_GI_40;
347 mt76_wr(dev, addr + 9 * 4, val);
350 void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
352 mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr));
353 mt76_wr(dev, MT_BA_CONTROL_1,
354 (get_unaligned_le16(addr + 4) |
355 FIELD_PREP(MT_BA_CONTROL_1_TID, tid) |
356 MT_BA_CONTROL_1_RESET));
359 void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
362 u32 addr = mt7603_wtbl2_addr(wcid);
363 u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
364 (MT_WTBL2_W15_BA_WIN_SIZE <<
365 (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT));
371 mt76_clear(dev, addr + (15 * 4), tid_mask);
375 for (i = 7; i > 0; i--) {
376 if (ba_size >= MT_AGG_SIZE_LIMIT(i))
380 tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) |
381 i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT);
383 mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
386 void mt7603_mac_sta_poll(struct mt7603_dev *dev)
388 static const u8 ac_to_tid[4] = {
389 [IEEE80211_AC_BE] = 0,
390 [IEEE80211_AC_BK] = 1,
391 [IEEE80211_AC_VI] = 4,
392 [IEEE80211_AC_VO] = 6
394 struct ieee80211_sta *sta;
395 struct mt7603_sta *msta;
396 u32 total_airtime = 0;
406 spin_lock_bh(&dev->sta_poll_lock);
407 if (list_empty(&dev->sta_poll_list)) {
408 spin_unlock_bh(&dev->sta_poll_lock);
412 msta = list_first_entry(&dev->sta_poll_list, struct mt7603_sta,
414 list_del_init(&msta->poll_list);
415 spin_unlock_bh(&dev->sta_poll_lock);
417 addr = mt7603_wtbl4_addr(msta->wcid.idx);
418 for (i = 0; i < 4; i++) {
419 u32 airtime_last = msta->tx_airtime_ac[i];
421 msta->tx_airtime_ac[i] = mt76_rr(dev, addr + i * 8);
422 airtime[i] = msta->tx_airtime_ac[i] - airtime_last;
424 total_airtime += airtime[i];
426 if (msta->tx_airtime_ac[i] & BIT(22))
431 mt7603_wtbl_update(dev, msta->wcid.idx,
432 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
433 memset(msta->tx_airtime_ac, 0,
434 sizeof(msta->tx_airtime_ac));
440 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
441 for (i = 0; i < 4; i++) {
442 struct mt76_queue *q = dev->mt76.q_tx[i].q;
444 u8 tid = ac_to_tid[i];
445 u32 txtime = airtime[qidx];
450 ieee80211_sta_register_airtime(sta, tid, txtime, 0);
459 spin_lock_bh(&dev->mt76.cc_lock);
460 dev->mphy.chan_state->cc_tx += total_airtime;
461 spin_unlock_bh(&dev->mt76.cc_lock);
464 static struct mt76_wcid *
465 mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
467 struct mt7603_sta *sta;
468 struct mt76_wcid *wcid;
470 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
473 wcid = rcu_dereference(dev->mt76.wcid[idx]);
474 if (unicast || !wcid)
480 sta = container_of(wcid, struct mt7603_sta, wcid);
484 return &sta->vif->sta.wcid;
488 mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
490 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
491 struct ieee80211_supported_band *sband;
492 struct ieee80211_hdr *hdr;
493 __le32 *rxd = (__le32 *)skb->data;
494 u32 rxd0 = le32_to_cpu(rxd[0]);
495 u32 rxd1 = le32_to_cpu(rxd[1]);
496 u32 rxd2 = le32_to_cpu(rxd[2]);
497 bool unicast = rxd1 & MT_RXD1_NORMAL_U2M;
498 bool insert_ccmp_hdr = false;
503 memset(status, 0, sizeof(*status));
505 i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1);
506 sband = (i & 1) ? &dev->mphy.sband_5g.sband : &dev->mphy.sband_2g.sband;
509 idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
510 status->wcid = mt7603_rx_get_wcid(dev, idx, unicast);
512 status->band = sband->band;
513 if (i < sband->n_channels)
514 status->freq = sband->channels[i].center_freq;
516 if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
517 status->flag |= RX_FLAG_FAILED_FCS_CRC;
519 if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
520 status->flag |= RX_FLAG_MMIC_ERROR;
522 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
523 !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
524 status->flag |= RX_FLAG_DECRYPTED;
525 status->flag |= RX_FLAG_IV_STRIPPED;
526 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
529 if (!(rxd2 & (MT_RXD2_NORMAL_NON_AMPDU_SUB |
530 MT_RXD2_NORMAL_NON_AMPDU))) {
531 status->flag |= RX_FLAG_AMPDU_DETAILS;
533 /* all subframes of an A-MPDU have the same timestamp */
534 if (dev->rx_ampdu_ts != rxd[12]) {
535 if (!++dev->ampdu_ref)
538 dev->rx_ampdu_ts = rxd[12];
540 status->ampdu_ref = dev->ampdu_ref;
543 remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
545 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
548 if (!sband->channels)
552 if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
554 if ((u8 *)rxd - skb->data >= skb->len)
557 if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
558 u8 *data = (u8 *)rxd;
560 if (status->flag & RX_FLAG_DECRYPTED) {
561 status->iv[0] = data[5];
562 status->iv[1] = data[4];
563 status->iv[2] = data[3];
564 status->iv[3] = data[2];
565 status->iv[4] = data[1];
566 status->iv[5] = data[0];
568 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
572 if ((u8 *)rxd - skb->data >= skb->len)
575 if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
577 if ((u8 *)rxd - skb->data >= skb->len)
580 if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
581 u32 rxdg0 = le32_to_cpu(rxd[0]);
582 u32 rxdg3 = le32_to_cpu(rxd[3]);
585 i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
586 switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
587 case MT_PHY_TYPE_CCK:
590 case MT_PHY_TYPE_OFDM:
591 i = mt76_get_rate(&dev->mt76, sband, i, cck);
593 case MT_PHY_TYPE_HT_GF:
595 status->encoding = RX_ENC_HT;
603 if (rxdg0 & MT_RXV1_HT_SHORT_GI)
604 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
605 if (rxdg0 & MT_RXV1_HT_AD_CODE)
606 status->enc_flags |= RX_ENC_FLAG_LDPC;
608 status->enc_flags |= RX_ENC_FLAG_STBC_MASK *
609 FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
611 status->rate_idx = i;
613 status->chains = dev->mphy.antenna_mask;
614 status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) +
616 status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) +
619 status->signal = status->chain_signal[0];
620 if (status->chains & BIT(1))
621 status->signal = max(status->signal,
622 status->chain_signal[1]);
624 if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1)
625 status->bw = RATE_INFO_BW_40;
628 if ((u8 *)rxd - skb->data >= skb->len)
634 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
636 if (insert_ccmp_hdr) {
637 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
639 mt76_insert_ccmp_hdr(skb, key_id);
642 hdr = (struct ieee80211_hdr *)skb->data;
643 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
646 status->aggr = unicast &&
647 !ieee80211_is_qos_nullfunc(hdr->frame_control);
648 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
649 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
655 mt7603_mac_tx_rate_val(struct mt7603_dev *dev,
656 const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw)
658 u8 phy, nss, rate_idx;
662 if (rate->flags & IEEE80211_TX_RC_MCS) {
663 rate_idx = rate->idx;
664 nss = 1 + (rate->idx >> 3);
665 phy = MT_PHY_TYPE_HT;
666 if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
667 phy = MT_PHY_TYPE_HT_GF;
668 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
671 const struct ieee80211_rate *r;
672 int band = dev->mphy.chandef.chan->band;
676 r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
677 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
678 val = r->hw_value_short;
683 rate_idx = val & 0xff;
686 rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
687 FIELD_PREP(MT_TX_RATE_MODE, phy));
689 if (stbc && nss == 1)
690 rateval |= MT_TX_RATE_STBC;
695 void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
696 struct ieee80211_tx_rate *probe_rate,
697 struct ieee80211_tx_rate *rates)
699 struct ieee80211_tx_rate *ref;
700 int wcid = sta->wcid.idx;
701 u32 addr = mt7603_wtbl2_addr(wcid);
703 int n_rates = sta->n_rates;
704 u8 bw, bw_prev, bw_idx = 0;
707 u32 w9 = mt76_rr(dev, addr + 9 * 4);
711 if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
714 for (i = n_rates; i < 4; i++)
715 rates[i] = rates[n_rates - 1];
717 rateset = !(sta->rate_set_tsf & BIT(0));
718 memcpy(sta->rateset[rateset].rates, rates,
719 sizeof(sta->rateset[rateset].rates));
721 sta->rateset[rateset].probe_rate = *probe_rate;
722 ref = &sta->rateset[rateset].probe_rate;
724 sta->rateset[rateset].probe_rate.idx = -1;
725 ref = &sta->rateset[rateset].rates[0];
728 rates = sta->rateset[rateset].rates;
729 for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
731 * We don't support switching between short and long GI
732 * within the rate set. For accurate tx status reporting, we
733 * need to make sure that flags match.
734 * For improved performance, avoid duplicate entries by
735 * decrementing the MCS index if necessary
737 if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
738 rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
740 for (k = 0; k < i; k++) {
741 if (rates[i].idx != rates[k].idx)
743 if ((rates[i].flags ^ rates[k].flags) &
744 IEEE80211_TX_RC_40_MHZ_WIDTH)
754 w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
755 MT_WTBL2_W9_SHORT_GI_80;
757 val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
761 probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
770 w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw);
771 w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw);
773 val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
779 val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
785 val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
789 w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE,
790 bw_idx ? bw_idx - 1 : 7);
792 mt76_wr(dev, MT_WTBL_RIUCR0, w9);
794 mt76_wr(dev, MT_WTBL_RIUCR1,
795 FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
796 FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
797 FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
799 mt76_wr(dev, MT_WTBL_RIUCR2,
800 FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
801 FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
802 FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
803 FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
805 mt76_wr(dev, MT_WTBL_RIUCR3,
806 FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
807 FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
808 FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
810 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
811 sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
813 mt76_wr(dev, MT_WTBL_UPDATE,
814 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
815 MT_WTBL_UPDATE_RATE_UPDATE |
816 MT_WTBL_UPDATE_TX_COUNT_CLEAR);
818 if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
819 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
821 sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
822 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
825 static enum mt7603_cipher_type
826 mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
828 memset(key_data, 0, 32);
830 return MT_CIPHER_NONE;
832 if (key->keylen > 32)
833 return MT_CIPHER_NONE;
835 memcpy(key_data, key->key, key->keylen);
837 switch (key->cipher) {
838 case WLAN_CIPHER_SUITE_WEP40:
839 return MT_CIPHER_WEP40;
840 case WLAN_CIPHER_SUITE_WEP104:
841 return MT_CIPHER_WEP104;
842 case WLAN_CIPHER_SUITE_TKIP:
843 /* Rx/Tx MIC keys are swapped */
844 memcpy(key_data + 16, key->key + 24, 8);
845 memcpy(key_data + 24, key->key + 16, 8);
846 return MT_CIPHER_TKIP;
847 case WLAN_CIPHER_SUITE_CCMP:
848 return MT_CIPHER_AES_CCMP;
850 return MT_CIPHER_NONE;
854 int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
855 struct ieee80211_key_conf *key)
857 enum mt7603_cipher_type cipher;
858 u32 addr = mt7603_wtbl3_addr(wcid);
860 int key_len = sizeof(key_data);
862 cipher = mt7603_mac_get_key_info(key, key_data);
863 if (cipher == MT_CIPHER_NONE && key)
866 if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) {
867 addr += key->keyidx * 16;
871 mt76_wr_copy(dev, addr, key_data, key_len);
873 addr = mt7603_wtbl1_addr(wcid);
874 mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher);
876 mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx);
877 mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key);
883 mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
884 struct sk_buff *skb, enum mt76_txq_id qid,
885 struct mt76_wcid *wcid, struct ieee80211_sta *sta,
886 int pid, struct ieee80211_key_conf *key)
888 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
889 struct ieee80211_tx_rate *rate = &info->control.rates[0];
890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
891 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
892 struct ieee80211_vif *vif = info->control.vif;
893 struct mt76_queue *q = dev->mt76.q_tx[qid].q;
894 struct mt7603_vif *mvif;
896 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
898 u8 frame_type, frame_subtype;
899 u16 fc = le16_to_cpu(hdr->frame_control);
906 mvif = (struct mt7603_vif *)vif->drv_priv;
908 if (vif_idx && qid >= MT_TXQ_BEACON)
913 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
915 tx_count = msta->rate_count;
919 wlan_idx = wcid->idx;
921 wlan_idx = MT7603_WTBL_RESERVED;
923 frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2;
924 frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4;
926 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
927 FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx);
928 txwi[0] = cpu_to_le32(val);
930 val = MT_TXD1_LONG_FORMAT |
931 FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) |
932 FIELD_PREP(MT_TXD1_TID,
933 skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
934 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
935 FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) |
936 FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) |
937 FIELD_PREP(MT_TXD1_PROTECTED, !!key);
938 txwi[1] = cpu_to_le32(val);
940 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
941 txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK);
943 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) |
944 FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) |
945 FIELD_PREP(MT_TXD2_MULTICAST,
946 is_multicast_ether_addr(hdr->addr1));
947 txwi[2] = cpu_to_le32(val);
949 if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
950 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
954 val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
955 FIELD_PREP(MT_TXD5_PID, pid);
956 txwi[5] = cpu_to_le32(val);
960 if (rate->idx >= 0 && rate->count &&
961 !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
962 bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
963 u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw);
965 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
967 val = MT_TXD6_FIXED_BW |
968 FIELD_PREP(MT_TXD6_BW, bw) |
969 FIELD_PREP(MT_TXD6_TX_RATE, rateval);
970 txwi[6] |= cpu_to_le32(val);
972 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
973 txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
975 if (!(rate->flags & IEEE80211_TX_RC_MCS))
976 txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
978 tx_count = rate->count;
981 /* use maximum tx count for beacons and buffered multicast */
982 if (qid >= MT_TXQ_BEACON)
985 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
988 if (ieee80211_is_data_qos(hdr->frame_control))
989 seqno = le16_to_cpu(hdr->seq_ctrl);
990 else if (ieee80211_is_back_req(hdr->frame_control))
991 seqno = le16_to_cpu(bar->start_seq_num);
993 val &= ~MT_TXD3_SN_VALID;
995 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
997 txwi[3] = cpu_to_le32(val);
1000 u64 pn = atomic64_inc_return(&key->tx_pn);
1002 txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID);
1003 txwi[4] = cpu_to_le32(pn & GENMASK(31, 0));
1004 txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32));
1012 int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1013 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1014 struct ieee80211_sta *sta,
1015 struct mt76_tx_info *tx_info)
1017 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1018 struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
1019 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1020 struct ieee80211_key_conf *key = info->control.hw_key;
1024 wcid = &dev->global_sta.wcid;
1027 msta = (struct mt7603_sta *)sta->drv_priv;
1029 if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
1030 IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
1031 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1032 mt7603_wtbl_set_ps(dev, msta, false);
1035 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1037 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
1038 spin_lock_bh(&dev->mt76.lock);
1039 mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
1041 msta->rate_probe = true;
1042 spin_unlock_bh(&dev->mt76.lock);
1045 mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
1052 mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
1053 struct ieee80211_tx_info *info, __le32 *txs_data)
1055 struct ieee80211_supported_band *sband;
1056 struct mt7603_rate_set *rs;
1057 int first_idx = 0, last_idx;
1060 u32 final_rate_flags;
1072 fixed_rate = info->status.rates[0].count;
1073 probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1075 txs = le32_to_cpu(txs_data[4]);
1076 ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
1077 count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
1078 last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
1080 txs = le32_to_cpu(txs_data[0]);
1081 final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1082 ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
1084 if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
1087 if (txs & MT_TXS0_QUEUE_TIMEOUT)
1091 info->flags |= IEEE80211_TX_STAT_ACK;
1093 info->status.ampdu_len = 1;
1094 info->status.ampdu_ack_len = !!(info->flags &
1095 IEEE80211_TX_STAT_ACK);
1097 if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
1098 info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
1100 first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
1102 if (fixed_rate && !probe) {
1103 info->status.rates[0].count = count;
1108 rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
1109 rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
1110 rate_set_tsf) < 1000000);
1111 rs_idx ^= rate_set_tsf & BIT(0);
1112 rs = &sta->rateset[rs_idx];
1114 if (!first_idx && rs->probe_rate.idx >= 0) {
1115 info->status.rates[0] = rs->probe_rate;
1117 spin_lock_bh(&dev->mt76.lock);
1118 if (sta->rate_probe) {
1119 mt7603_wtbl_set_rates(dev, sta, NULL,
1121 sta->rate_probe = false;
1123 spin_unlock_bh(&dev->mt76.lock);
1125 info->status.rates[0] = rs->rates[first_idx / 2];
1127 info->status.rates[0].count = 0;
1129 for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
1130 struct ieee80211_tx_rate *cur_rate;
1133 cur_rate = &rs->rates[idx / 2];
1134 cur_count = min_t(int, MT7603_RATE_RETRY, count);
1137 if (idx && (cur_rate->idx != info->status.rates[i].idx ||
1138 cur_rate->flags != info->status.rates[i].flags)) {
1140 if (i == ARRAY_SIZE(info->status.rates)) {
1145 info->status.rates[i] = *cur_rate;
1146 info->status.rates[i].count = 0;
1149 info->status.rates[i].count += cur_count;
1153 final_rate_flags = info->status.rates[i].flags;
1155 switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
1156 case MT_PHY_TYPE_CCK:
1159 case MT_PHY_TYPE_OFDM:
1160 if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
1161 sband = &dev->mphy.sband_5g.sband;
1163 sband = &dev->mphy.sband_2g.sband;
1164 final_rate &= GENMASK(5, 0);
1165 final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
1167 final_rate_flags = 0;
1169 case MT_PHY_TYPE_HT_GF:
1170 case MT_PHY_TYPE_HT:
1171 final_rate_flags |= IEEE80211_TX_RC_MCS;
1172 final_rate &= GENMASK(5, 0);
1173 if (final_rate > 15)
1180 info->status.rates[i].idx = final_rate;
1181 info->status.rates[i].flags = final_rate_flags;
1187 mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
1190 struct mt76_dev *mdev = &dev->mt76;
1191 struct sk_buff_head list;
1192 struct sk_buff *skb;
1194 if (pid < MT_PACKET_ID_FIRST)
1197 trace_mac_txdone(mdev, sta->wcid.idx, pid);
1199 mt76_tx_status_lock(mdev, &list);
1200 skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
1202 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1204 if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
1205 ieee80211_tx_info_clear_status(info);
1206 info->status.rates[0].idx = -1;
1209 mt76_tx_status_skb_done(mdev, skb, &list);
1211 mt76_tx_status_unlock(mdev, &list);
1216 void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data)
1218 struct ieee80211_tx_info info = {};
1219 struct ieee80211_sta *sta = NULL;
1220 struct mt7603_sta *msta = NULL;
1221 struct mt76_wcid *wcid;
1222 __le32 *txs_data = data;
1227 txs = le32_to_cpu(txs_data[4]);
1228 pid = FIELD_GET(MT_TXS4_PID, txs);
1229 txs = le32_to_cpu(txs_data[3]);
1230 wcidx = FIELD_GET(MT_TXS3_WCID, txs);
1232 if (pid == MT_PACKET_ID_NO_ACK)
1235 if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
1240 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1244 msta = container_of(wcid, struct mt7603_sta, wcid);
1245 sta = wcid_to_sta(wcid);
1247 if (list_empty(&msta->poll_list)) {
1248 spin_lock_bh(&dev->sta_poll_lock);
1249 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1250 spin_unlock_bh(&dev->sta_poll_lock);
1253 if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data))
1256 if (wcidx >= MT7603_WTBL_STA || !sta)
1259 if (mt7603_fill_txs(dev, msta, &info, txs_data))
1260 ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
1266 void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
1267 struct mt76_queue_entry *e)
1269 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1270 struct sk_buff *skb = e->skb;
1273 dev_kfree_skb_any(skb);
1278 dev->tx_hang_check = 0;
1280 mt76_tx_complete_skb(mdev, skb);
1284 wait_for_wpdma(struct mt7603_dev *dev)
1286 return mt76_poll(dev, MT_WPDMA_GLO_CFG,
1287 MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
1288 MT_WPDMA_GLO_CFG_RX_DMA_BUSY,
1292 static void mt7603_pse_reset(struct mt7603_dev *dev)
1294 /* Clear previous reset result */
1295 if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1296 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S);
1299 mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1301 if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET,
1302 MT_MCU_DEBUG_RESET_PSE_S,
1303 MT_MCU_DEBUG_RESET_PSE_S, 500)) {
1304 dev->reset_cause[RESET_CAUSE_RESET_FAILED]++;
1305 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE);
1307 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1308 mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES);
1311 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3)
1312 dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0;
1315 void mt7603_mac_dma_start(struct mt7603_dev *dev)
1317 mt7603_mac_start(dev);
1319 wait_for_wpdma(dev);
1320 usleep_range(50, 100);
1322 mt76_set(dev, MT_WPDMA_GLO_CFG,
1323 (MT_WPDMA_GLO_CFG_TX_DMA_EN |
1324 MT_WPDMA_GLO_CFG_RX_DMA_EN |
1325 FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
1326 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE));
1328 mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
1331 void mt7603_mac_start(struct mt7603_dev *dev)
1333 mt76_clear(dev, MT_ARB_SCR,
1334 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1335 mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0);
1336 mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1339 void mt7603_mac_stop(struct mt7603_dev *dev)
1341 mt76_set(dev, MT_ARB_SCR,
1342 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1343 mt76_wr(dev, MT_WF_ARB_TX_START_0, 0);
1344 mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START);
1347 void mt7603_pse_client_reset(struct mt7603_dev *dev)
1351 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR +
1352 MT_CLIENT_RESET_TX);
1354 /* Clear previous reset state */
1355 mt76_clear(dev, addr,
1356 MT_CLIENT_RESET_TX_R_E_1 |
1357 MT_CLIENT_RESET_TX_R_E_2 |
1358 MT_CLIENT_RESET_TX_R_E_1_S |
1359 MT_CLIENT_RESET_TX_R_E_2_S);
1361 /* Start PSE client TX abort */
1362 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
1363 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
1364 MT_CLIENT_RESET_TX_R_E_1_S, 500);
1366 mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2);
1367 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
1369 /* Wait for PSE client to clear TX FIFO */
1370 mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S,
1371 MT_CLIENT_RESET_TX_R_E_2_S, 500);
1373 /* Clear PSE client TX abort state */
1374 mt76_clear(dev, addr,
1375 MT_CLIENT_RESET_TX_R_E_1 |
1376 MT_CLIENT_RESET_TX_R_E_2);
1379 static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
1381 if (!is_mt7628(dev))
1384 mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET);
1385 mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET);
1388 static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
1390 int beacon_int = dev->mt76.beacon_int;
1391 u32 mask = dev->mt76.mmio.irqmask;
1394 ieee80211_stop_queues(dev->mt76.hw);
1395 set_bit(MT76_RESET, &dev->mphy.state);
1397 /* lock/unlock all queues to ensure that no tx is pending */
1398 mt76_txq_schedule_all(&dev->mphy);
1400 tasklet_disable(&dev->mt76.tx_tasklet);
1401 tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
1402 napi_disable(&dev->mt76.napi[0]);
1403 napi_disable(&dev->mt76.napi[1]);
1404 napi_disable(&dev->mt76.tx_napi);
1406 mutex_lock(&dev->mt76.mutex);
1408 mt7603_beacon_set_timer(dev, -1, 0);
1410 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] ||
1411 dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY ||
1412 dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK ||
1413 dev->cur_reset_cause == RESET_CAUSE_TX_HANG)
1414 mt7603_pse_reset(dev);
1416 if (dev->reset_cause[RESET_CAUSE_RESET_FAILED])
1417 goto skip_dma_reset;
1419 mt7603_mac_stop(dev);
1421 mt76_clear(dev, MT_WPDMA_GLO_CFG,
1422 MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN |
1423 MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
1424 usleep_range(1000, 2000);
1426 mt7603_irq_disable(dev, mask);
1428 mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
1430 mt7603_pse_client_reset(dev);
1432 for (i = 0; i < __MT_TXQ_MAX; i++)
1433 mt76_queue_tx_cleanup(dev, i, true);
1435 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
1436 mt76_queue_rx_reset(dev, i);
1438 mt7603_dma_sched_reset(dev);
1440 mt7603_mac_dma_start(dev);
1442 mt7603_irq_enable(dev, mask);
1445 clear_bit(MT76_RESET, &dev->mphy.state);
1446 mutex_unlock(&dev->mt76.mutex);
1448 tasklet_enable(&dev->mt76.tx_tasklet);
1449 napi_enable(&dev->mt76.tx_napi);
1450 napi_schedule(&dev->mt76.tx_napi);
1452 tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
1453 mt7603_beacon_set_timer(dev, -1, beacon_int);
1455 napi_enable(&dev->mt76.napi[0]);
1456 napi_schedule(&dev->mt76.napi[0]);
1458 napi_enable(&dev->mt76.napi[1]);
1459 napi_schedule(&dev->mt76.napi[1]);
1461 ieee80211_wake_queues(dev->mt76.hw);
1462 mt76_txq_schedule_all(&dev->mphy);
1465 static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index)
1469 mt76_wr(dev, MT_WPDMA_DEBUG,
1470 FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) |
1471 MT_WPDMA_DEBUG_SEL);
1473 val = mt76_rr(dev, MT_WPDMA_DEBUG);
1474 return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val);
1477 static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev)
1480 return mt7603_dma_debug(dev, 9) & BIT(9);
1482 return mt7603_dma_debug(dev, 2) & BIT(8);
1485 static bool mt7603_rx_dma_busy(struct mt7603_dev *dev)
1487 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY))
1490 return mt7603_rx_fifo_busy(dev);
1493 static bool mt7603_tx_dma_busy(struct mt7603_dev *dev)
1497 if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY))
1500 val = mt7603_dma_debug(dev, 9);
1501 return (val & BIT(8)) && (val & 0xf) != 0xf;
1504 static bool mt7603_tx_hang(struct mt7603_dev *dev)
1506 struct mt76_queue *q;
1507 u32 dma_idx, prev_dma_idx;
1510 for (i = 0; i < 4; i++) {
1511 q = dev->mt76.q_tx[i].q;
1516 prev_dma_idx = dev->tx_dma_idx[i];
1517 dma_idx = readl(&q->regs->dma_idx);
1518 dev->tx_dma_idx[i] = dma_idx;
1520 if (dma_idx == prev_dma_idx &&
1521 dma_idx != readl(&q->regs->cpu_idx))
1528 static bool mt7603_rx_pse_busy(struct mt7603_dev *dev)
1532 if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES)
1535 if (mt7603_rx_fifo_busy(dev))
1538 addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS);
1539 mt76_wr(dev, addr, 3);
1540 val = mt76_rr(dev, addr) >> 16;
1542 if (is_mt7628(dev) && (val & 0x4001) == 0x4001)
1545 return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001;
1549 mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter,
1550 enum mt7603_reset_cause cause,
1551 bool (*check)(struct mt7603_dev *dev))
1553 if (dev->reset_test == cause + 1) {
1554 dev->reset_test = 0;
1559 if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) {
1567 if (*counter < MT7603_WATCHDOG_TIMEOUT)
1570 dev->cur_reset_cause = cause;
1571 dev->reset_cause[cause]++;
1575 void mt7603_update_channel(struct mt76_dev *mdev)
1577 struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
1578 struct mt76_channel_state *state;
1580 state = mdev->phy.chan_state;
1581 state->cc_busy += mt76_rr(dev, MT_MIB_STAT_CCA);
1585 mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val)
1587 u32 rxtd_6 = 0xd7c80000;
1589 if (val == dev->ed_strict_mode)
1592 dev->ed_strict_mode = val;
1594 /* Ensure that ED/CCA does not trigger if disabled */
1595 if (!dev->ed_monitor)
1596 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34);
1598 rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d);
1600 if (dev->ed_monitor && !dev->ed_strict_mode)
1601 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f);
1603 rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10);
1605 mt76_wr(dev, MT_RXTD(6), rxtd_6);
1607 mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN,
1608 dev->ed_monitor && !dev->ed_strict_mode);
1612 mt7603_edcca_check(struct mt7603_dev *dev)
1614 u32 val = mt76_rr(dev, MT_AGC(41));
1620 if (!dev->ed_monitor)
1623 rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val);
1627 rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val);
1631 if (max(rssi0, rssi1) >= -40 &&
1632 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH)
1633 dev->ed_strong_signal++;
1634 else if (dev->ed_strong_signal > 0)
1635 dev->ed_strong_signal--;
1637 cur_time = ktime_get_boottime();
1638 ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK;
1640 active = ktime_to_us(ktime_sub(cur_time, dev->ed_time));
1641 dev->ed_time = cur_time;
1646 if (100 * ed_busy / active > 90) {
1647 if (dev->ed_trigger < 0)
1648 dev->ed_trigger = 0;
1651 if (dev->ed_trigger > 0)
1652 dev->ed_trigger = 0;
1656 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH ||
1657 dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) {
1658 mt7603_edcca_set_strict(dev, true);
1659 } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) {
1660 mt7603_edcca_set_strict(dev, false);
1663 if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH)
1664 dev->ed_trigger = MT7603_EDCCA_BLOCK_TH;
1665 else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH)
1666 dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH;
1669 void mt7603_cca_stats_reset(struct mt7603_dev *dev)
1671 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1672 mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET);
1673 mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN);
1677 mt7603_adjust_sensitivity(struct mt7603_dev *dev)
1679 u32 agc0 = dev->agc0, agc3 = dev->agc3;
1682 if (!dev->sensitivity || dev->sensitivity < -100) {
1683 dev->sensitivity = 0;
1684 } else if (dev->sensitivity <= -84) {
1685 adj = 7 + (dev->sensitivity + 92) / 2;
1691 } else if (dev->sensitivity <= -72) {
1692 adj = 7 + (dev->sensitivity + 80) / 2;
1701 if (dev->sensitivity > -54)
1702 dev->sensitivity = -54;
1704 adj = 7 + (dev->sensitivity + 80) / 2;
1715 mt76_wr(dev, MT_AGC(0), agc0);
1716 mt76_wr(dev, MT_AGC1(0), agc0);
1718 mt76_wr(dev, MT_AGC(3), agc3);
1719 mt76_wr(dev, MT_AGC1(3), agc3);
1723 mt7603_false_cca_check(struct mt7603_dev *dev)
1725 int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm;
1730 if (!dev->dynamic_sensitivity)
1733 val = mt76_rr(dev, MT_PHYCTRL_STAT_PD);
1734 pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val);
1735 pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val);
1737 val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY);
1738 mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val);
1739 mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val);
1741 dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm;
1742 dev->false_cca_cck = pd_cck - mdrdy_cck;
1744 mt7603_cca_stats_reset(dev);
1746 min_signal = mt76_get_min_avg_rssi(&dev->mt76, false);
1748 dev->sensitivity = 0;
1749 dev->last_cca_adj = jiffies;
1755 false_cca = dev->false_cca_ofdm + dev->false_cca_cck;
1756 if (false_cca > 600 &&
1757 dev->sensitivity < -100 + dev->sensitivity_limit) {
1758 if (!dev->sensitivity)
1759 dev->sensitivity = -92;
1761 dev->sensitivity += 2;
1762 dev->last_cca_adj = jiffies;
1763 } else if (false_cca < 100 ||
1764 time_after(jiffies, dev->last_cca_adj + 10 * HZ)) {
1765 dev->last_cca_adj = jiffies;
1766 if (!dev->sensitivity)
1769 dev->sensitivity -= 2;
1772 if (dev->sensitivity && dev->sensitivity > min_signal) {
1773 dev->sensitivity = min_signal;
1774 dev->last_cca_adj = jiffies;
1778 mt7603_adjust_sensitivity(dev);
1781 void mt7603_mac_work(struct work_struct *work)
1783 struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
1784 mt76.mac_work.work);
1788 mt76_tx_status_check(&dev->mt76, NULL, false);
1790 mutex_lock(&dev->mt76.mutex);
1792 dev->mac_work_count++;
1793 mt76_update_survey(&dev->mt76);
1794 mt7603_edcca_check(dev);
1796 for (i = 0, idx = 0; i < 2; i++) {
1797 u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
1799 dev->mt76.aggr_stats[idx++] += val & 0xffff;
1800 dev->mt76.aggr_stats[idx++] += val >> 16;
1803 if (dev->mac_work_count == 10)
1804 mt7603_false_cca_check(dev);
1806 if (mt7603_watchdog_check(dev, &dev->rx_pse_check,
1807 RESET_CAUSE_RX_PSE_BUSY,
1808 mt7603_rx_pse_busy) ||
1809 mt7603_watchdog_check(dev, &dev->beacon_check,
1810 RESET_CAUSE_BEACON_STUCK,
1812 mt7603_watchdog_check(dev, &dev->tx_hang_check,
1813 RESET_CAUSE_TX_HANG,
1815 mt7603_watchdog_check(dev, &dev->tx_dma_check,
1816 RESET_CAUSE_TX_BUSY,
1817 mt7603_tx_dma_busy) ||
1818 mt7603_watchdog_check(dev, &dev->rx_dma_check,
1819 RESET_CAUSE_RX_BUSY,
1820 mt7603_rx_dma_busy) ||
1821 mt7603_watchdog_check(dev, &dev->mcu_hang,
1822 RESET_CAUSE_MCU_HANG,
1824 dev->reset_cause[RESET_CAUSE_RESET_FAILED]) {
1825 dev->beacon_check = 0;
1826 dev->tx_dma_check = 0;
1827 dev->tx_hang_check = 0;
1828 dev->rx_dma_check = 0;
1829 dev->rx_pse_check = 0;
1831 dev->rx_dma_idx = ~0;
1832 memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx));
1834 dev->mac_work_count = 0;
1837 if (dev->mac_work_count >= 10)
1838 dev->mac_work_count = 0;
1840 mutex_unlock(&dev->mt76.mutex);
1843 mt7603_mac_watchdog_reset(dev);
1845 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
1846 msecs_to_jiffies(MT7603_WATCHDOG_TIME));