1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
27 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
28 enum nl80211_iftype iftype)
33 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
36 lockdep_assert_held(&mvm->mutex);
38 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
39 if (iftype != NL80211_IFTYPE_STATION)
40 reserved_ids = BIT(0);
42 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
44 if (BIT(sta_id) & reserved_ids)
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
48 lockdep_is_held(&mvm->mutex)))
51 return IWL_MVM_INVALID_STA;
54 /* send station add/update command to firmware */
55 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
56 bool update, unsigned int flags)
58 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
59 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
60 .sta_id = mvm_sta->sta_id,
61 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
62 .add_modify = update ? 1 : 0,
63 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
65 STA_FLG_RTS_MIMO_PROT),
66 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
70 u32 agg_size = 0, mpdu_dens = 0;
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
73 add_sta_cmd.station_type = mvm_sta->sta_type;
75 if (!update || (flags & STA_MODIFY_QUEUES)) {
76 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
78 if (!iwl_mvm_has_new_tx_api(mvm)) {
79 add_sta_cmd.tfd_queue_msk =
80 cpu_to_le32(mvm_sta->tfd_queue_msk);
82 if (flags & STA_MODIFY_QUEUES)
83 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
85 WARN_ON(flags & STA_MODIFY_QUEUES);
89 switch (sta->deflink.bandwidth) {
90 case IEEE80211_STA_RX_BW_320:
91 case IEEE80211_STA_RX_BW_160:
92 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
94 case IEEE80211_STA_RX_BW_80:
95 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
97 case IEEE80211_STA_RX_BW_40:
98 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
100 case IEEE80211_STA_RX_BW_20:
101 if (sta->deflink.ht_cap.ht_supported)
102 add_sta_cmd.station_flags |=
103 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
107 switch (sta->deflink.rx_nss) {
109 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
112 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
115 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
119 switch (sta->smps_mode) {
120 case IEEE80211_SMPS_AUTOMATIC:
121 case IEEE80211_SMPS_NUM_MODES:
124 case IEEE80211_SMPS_STATIC:
126 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
129 case IEEE80211_SMPS_DYNAMIC:
130 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
132 case IEEE80211_SMPS_OFF:
137 if (sta->deflink.ht_cap.ht_supported) {
138 add_sta_cmd.station_flags_msk |=
139 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
140 STA_FLG_AGG_MPDU_DENS_MSK);
142 mpdu_dens = sta->deflink.ht_cap.ampdu_density;
145 if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
146 add_sta_cmd.station_flags_msk |=
147 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
148 STA_FLG_AGG_MPDU_DENS_MSK);
150 mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
151 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
152 agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
153 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
154 } else if (sta->deflink.vht_cap.vht_supported) {
155 agg_size = sta->deflink.vht_cap.cap &
156 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
159 } else if (sta->deflink.ht_cap.ht_supported) {
160 agg_size = sta->deflink.ht_cap.ampdu_factor;
163 /* D6.0 10.12.2 A-MPDU length limit rules
164 * A STA indicates the maximum length of the A-MPDU preEOF padding
165 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
166 * Exponent field in its HT Capabilities, VHT Capabilities,
167 * and HE 6 GHz Band Capabilities elements (if present) and the
168 * Maximum AMPDU Length Exponent Extension field in its HE
169 * Capabilities element
171 if (sta->deflink.he_cap.has_he)
172 agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
173 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
175 /* Limit to max A-MPDU supported by FW */
176 if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
177 agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
178 STA_FLG_MAX_AGG_SIZE_SHIFT);
180 add_sta_cmd.station_flags |=
181 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
182 add_sta_cmd.station_flags |=
183 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
184 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
185 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
188 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
190 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
191 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
192 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
193 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
194 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
195 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
196 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
197 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
198 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
199 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
202 status = ADD_STA_SUCCESS;
203 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
204 iwl_mvm_add_sta_cmd_size(mvm),
205 &add_sta_cmd, &status);
209 switch (status & IWL_ADD_STA_STATUS_MASK) {
210 case ADD_STA_SUCCESS:
211 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
215 IWL_ERR(mvm, "ADD_STA failed\n");
222 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
224 struct iwl_mvm_baid_data *data =
225 from_timer(data, t, session_timer);
226 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
227 struct iwl_mvm_baid_data *ba_data;
228 struct ieee80211_sta *sta;
229 struct iwl_mvm_sta *mvm_sta;
230 unsigned long timeout;
234 ba_data = rcu_dereference(*rcu_ptr);
236 if (WARN_ON(!ba_data))
239 if (!ba_data->timeout)
242 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
243 if (time_is_after_jiffies(timeout)) {
244 mod_timer(&ba_data->session_timer, timeout);
249 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
252 * sta should be valid unless the following happens:
253 * The firmware asserts which triggers a reconfig flow, but
254 * the reconfig fails before we set the pointer to sta into
255 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
256 * A-MDPU and hence the timer continues to run. Then, the
257 * timer expires and sta is NULL.
262 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
263 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
264 sta->addr, ba_data->tid);
269 /* Disable aggregations for a bitmap of TIDs for a given station */
270 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
271 unsigned long disable_agg_tids,
274 struct iwl_mvm_add_sta_cmd cmd = {};
275 struct ieee80211_sta *sta;
276 struct iwl_mvm_sta *mvmsta;
280 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
283 sta_id = mvm->queue_info[queue].ra_sta_id;
287 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
289 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
294 mvmsta = iwl_mvm_sta_from_mac80211(sta);
296 mvmsta->tid_disable_agg |= disable_agg_tids;
298 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
299 cmd.sta_id = mvmsta->sta_id;
300 cmd.add_modify = STA_MODE_MODIFY;
301 cmd.modify_mask = STA_MODIFY_QUEUES;
302 if (disable_agg_tids)
303 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
305 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
306 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
307 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
311 /* Notify FW of queue removal from the STA queues */
312 status = ADD_STA_SUCCESS;
313 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
314 iwl_mvm_add_sta_cmd_size(mvm),
318 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
319 u16 *queueptr, u8 tid)
321 int queue = *queueptr;
322 struct iwl_scd_txq_cfg_cmd cmd = {
324 .action = SCD_CFG_DISABLE_QUEUE,
328 lockdep_assert_held(&mvm->mutex);
330 if (iwl_mvm_has_new_tx_api(mvm)) {
331 if (mvm->sta_remove_requires_queue_remove) {
332 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
333 SCD_QUEUE_CONFIG_CMD);
334 struct iwl_scd_queue_cfg_cmd remove_cmd = {
335 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
336 .u.remove.queue = cpu_to_le32(queue),
339 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
346 iwl_trans_txq_free(mvm->trans, queue);
347 *queueptr = IWL_MVM_INVALID_QUEUE;
352 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
355 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
357 cmd.action = mvm->queue_info[queue].tid_bitmap ?
358 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
359 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
360 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
362 IWL_DEBUG_TX_QUEUES(mvm,
363 "Disabling TXQ #%d tids=0x%x\n",
365 mvm->queue_info[queue].tid_bitmap);
367 /* If the queue is still enabled - nothing left to do in this func */
368 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
371 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
372 cmd.tid = mvm->queue_info[queue].txq_tid;
374 /* Make sure queue info is correct even though we overwrite it */
375 WARN(mvm->queue_info[queue].tid_bitmap,
376 "TXQ #%d info out-of-sync - tids=0x%x\n",
377 queue, mvm->queue_info[queue].tid_bitmap);
379 /* If we are here - the queue is freed and we can zero out these vals */
380 mvm->queue_info[queue].tid_bitmap = 0;
383 struct iwl_mvm_txq *mvmtxq =
384 iwl_mvm_txq_from_tid(sta, tid);
386 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
389 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
390 mvm->queue_info[queue].reserved = false;
392 iwl_trans_txq_disable(mvm->trans, queue, false);
393 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
394 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
397 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
402 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
404 struct ieee80211_sta *sta;
405 struct iwl_mvm_sta *mvmsta;
406 unsigned long tid_bitmap;
407 unsigned long agg_tids = 0;
411 lockdep_assert_held(&mvm->mutex);
413 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
416 sta_id = mvm->queue_info[queue].ra_sta_id;
417 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
419 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
420 lockdep_is_held(&mvm->mutex));
422 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
425 mvmsta = iwl_mvm_sta_from_mac80211(sta);
427 spin_lock_bh(&mvmsta->lock);
428 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
429 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
430 agg_tids |= BIT(tid);
432 spin_unlock_bh(&mvmsta->lock);
438 * Remove a queue from a station's resources.
439 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
440 * doesn't disable the queue
442 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
444 struct ieee80211_sta *sta;
445 struct iwl_mvm_sta *mvmsta;
446 unsigned long tid_bitmap;
447 unsigned long disable_agg_tids = 0;
451 lockdep_assert_held(&mvm->mutex);
453 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
456 sta_id = mvm->queue_info[queue].ra_sta_id;
457 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
461 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
463 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
468 mvmsta = iwl_mvm_sta_from_mac80211(sta);
470 spin_lock_bh(&mvmsta->lock);
471 /* Unmap MAC queues and TIDs from this queue */
472 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
473 struct iwl_mvm_txq *mvmtxq =
474 iwl_mvm_txq_from_tid(sta, tid);
476 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
477 disable_agg_tids |= BIT(tid);
478 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
480 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
483 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
484 spin_unlock_bh(&mvmsta->lock);
489 * The TX path may have been using this TXQ_ID from the tid_data,
490 * so make sure it's no longer running so that we can safely reuse
491 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
492 * above, but nothing guarantees we've stopped using them. Thus,
493 * without this, we could get to iwl_mvm_disable_txq() and remove
494 * the queue while still sending frames to it.
498 return disable_agg_tids;
501 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
502 struct ieee80211_sta *old_sta,
505 struct iwl_mvm_sta *mvmsta;
507 unsigned long disable_agg_tids = 0;
509 u16 queue_tmp = queue;
512 lockdep_assert_held(&mvm->mutex);
514 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
517 sta_id = mvm->queue_info[queue].ra_sta_id;
518 tid = mvm->queue_info[queue].txq_tid;
520 same_sta = sta_id == new_sta_id;
522 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
523 if (WARN_ON(!mvmsta))
526 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
527 /* Disable the queue */
528 if (disable_agg_tids)
529 iwl_mvm_invalidate_sta_queue(mvm, queue,
530 disable_agg_tids, false);
532 ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
535 "Failed to free inactive queue %d (ret=%d)\n",
541 /* If TXQ is allocated to another STA, update removal in FW */
543 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
548 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
549 unsigned long tfd_queue_mask, u8 ac)
552 u8 ac_to_queue[IEEE80211_NUM_ACS];
556 * This protects us against grabbing a queue that's being reconfigured
557 * by the inactivity checker.
559 lockdep_assert_held(&mvm->mutex);
561 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
564 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
566 /* See what ACs the existing queues for this STA have */
567 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
568 /* Only DATA queues can be shared */
569 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
570 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
573 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
577 * The queue to share is chosen only from DATA queues as follows (in
578 * descending priority):
581 * 3. Highest AC queue that is lower than new AC
582 * 4. Any existing AC (there always is at least 1 DATA queue)
585 /* Priority 1: An AC_BE queue */
586 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
587 queue = ac_to_queue[IEEE80211_AC_BE];
588 /* Priority 2: Same AC queue */
589 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
590 queue = ac_to_queue[ac];
591 /* Priority 3a: If new AC is VO and VI exists - use VI */
592 else if (ac == IEEE80211_AC_VO &&
593 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
594 queue = ac_to_queue[IEEE80211_AC_VI];
595 /* Priority 3b: No BE so only AC less than the new one is BK */
596 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
597 queue = ac_to_queue[IEEE80211_AC_BK];
598 /* Priority 4a: No BE nor BK - use VI if exists */
599 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
600 queue = ac_to_queue[IEEE80211_AC_VI];
601 /* Priority 4b: No BE, BK nor VI - use VO if exists */
602 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
603 queue = ac_to_queue[IEEE80211_AC_VO];
605 /* Make sure queue found (or not) is legal */
606 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
607 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
608 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
609 IWL_ERR(mvm, "No DATA queues available to share\n");
616 /* Re-configure the SCD for a queue that has already been configured */
617 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
618 int sta_id, int tid, int frame_limit, u16 ssn)
620 struct iwl_scd_txq_cfg_cmd cmd = {
622 .action = SCD_CFG_ENABLE_QUEUE,
623 .window = frame_limit,
625 .ssn = cpu_to_le16(ssn),
627 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
628 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
633 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
636 if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
637 "Trying to reconfig unallocated queue %d\n", queue))
640 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
642 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
643 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
650 * If a given queue has a higher AC than the TID stream that is being compared
651 * to, the queue needs to be redirected to the lower AC. This function does that
652 * in such a case, otherwise - if no redirection required - it does nothing,
653 * unless the %force param is true.
655 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
656 int ac, int ssn, unsigned int wdg_timeout,
657 bool force, struct iwl_mvm_txq *txq)
659 struct iwl_scd_txq_cfg_cmd cmd = {
661 .action = SCD_CFG_DISABLE_QUEUE,
666 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
670 * If the AC is lower than current one - FIFO needs to be redirected to
671 * the lowest one of the streams in the queue. Check if this is needed
673 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
674 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
675 * we need to check if the numerical value of X is LARGER than of Y.
677 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
678 IWL_DEBUG_TX_QUEUES(mvm,
679 "No redirection needed on TXQ #%d\n",
684 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
685 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
686 cmd.tid = mvm->queue_info[queue].txq_tid;
687 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
689 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
690 queue, iwl_mvm_ac_to_tx_fifo[ac]);
692 /* Stop the queue and wait for it to empty */
695 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
697 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
703 /* Before redirecting the queue we need to de-activate it */
704 iwl_trans_txq_disable(mvm->trans, queue, false);
705 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
707 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
710 /* Make sure the SCD wrptr is correctly set before reconfiguring */
711 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
713 /* Update the TID "owner" of the queue */
714 mvm->queue_info[queue].txq_tid = tid;
716 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
718 /* Redirect to lower AC */
719 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
720 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
722 /* Update AC marking of the queue */
723 mvm->queue_info[queue].mac80211_ac = ac;
726 * Mark queue as shared in transport if shared
727 * Note this has to be done after queue enablement because enablement
728 * can also set this value, and there is no indication there to shared
732 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
735 /* Continue using the queue */
736 txq->stopped = false;
741 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
746 lockdep_assert_held(&mvm->mutex);
748 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
749 "max queue %d >= num_of_queues (%d)", maxq,
750 mvm->trans->trans_cfg->base_params->num_of_queues))
751 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
753 /* This should not be hit with new TX path */
754 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
757 /* Start by looking for a free queue */
758 for (i = minq; i <= maxq; i++)
759 if (mvm->queue_info[i].tid_bitmap == 0 &&
760 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
766 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
767 u8 sta_id, u8 tid, unsigned int timeout)
771 if (tid == IWL_MAX_TID_COUNT) {
773 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
774 mvm->trans->cfg->min_txq_size);
776 struct ieee80211_sta *sta;
779 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
781 /* this queue isn't used for traffic (cab_queue) */
782 if (IS_ERR_OR_NULL(sta)) {
783 size = IWL_MGMT_QUEUE_SIZE;
784 } else if (sta->deflink.he_cap.has_he) {
785 /* support for 256 ba size */
786 size = IWL_DEFAULT_QUEUE_SIZE_HE;
788 size = IWL_DEFAULT_QUEUE_SIZE;
794 /* take the min with bc tbl entries allowed */
795 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
797 /* size needs to be power of 2 values for calculating read/write pointers */
798 size = rounddown_pow_of_two(size);
801 queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
805 IWL_DEBUG_TX_QUEUES(mvm,
806 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
807 size, sta_id, tid, queue);
809 } while (queue < 0 && size >= 16);
814 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
820 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
821 struct ieee80211_sta *sta, u8 ac,
824 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
825 struct iwl_mvm_txq *mvmtxq =
826 iwl_mvm_txq_from_tid(sta, tid);
827 unsigned int wdg_timeout =
828 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
831 lockdep_assert_held(&mvm->mutex);
833 IWL_DEBUG_TX_QUEUES(mvm,
834 "Allocating queue for sta %d on tid %d\n",
835 mvmsta->sta_id, tid);
836 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
840 mvmtxq->txq_id = queue;
841 mvm->tvqm_info[queue].txq_tid = tid;
842 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
844 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
846 spin_lock_bh(&mvmsta->lock);
847 mvmsta->tid_data[tid].txq_id = queue;
848 spin_unlock_bh(&mvmsta->lock);
853 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
854 struct ieee80211_sta *sta,
855 int queue, u8 sta_id, u8 tid)
857 bool enable_queue = true;
859 /* Make sure this TID isn't already enabled */
860 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
861 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
866 /* Update mappings and refcounts */
867 if (mvm->queue_info[queue].tid_bitmap)
868 enable_queue = false;
870 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
871 mvm->queue_info[queue].ra_sta_id = sta_id;
874 if (tid != IWL_MAX_TID_COUNT)
875 mvm->queue_info[queue].mac80211_ac =
876 tid_to_mac80211_ac[tid];
878 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
880 mvm->queue_info[queue].txq_tid = tid;
884 struct iwl_mvm_txq *mvmtxq =
885 iwl_mvm_txq_from_tid(sta, tid);
887 mvmtxq->txq_id = queue;
890 IWL_DEBUG_TX_QUEUES(mvm,
891 "Enabling TXQ #%d tids=0x%x\n",
892 queue, mvm->queue_info[queue].tid_bitmap);
897 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
899 const struct iwl_trans_txq_scd_cfg *cfg,
900 unsigned int wdg_timeout)
902 struct iwl_scd_txq_cfg_cmd cmd = {
904 .action = SCD_CFG_ENABLE_QUEUE,
905 .window = cfg->frame_limit,
906 .sta_id = cfg->sta_id,
907 .ssn = cpu_to_le16(ssn),
908 .tx_fifo = cfg->fifo,
909 .aggregate = cfg->aggregate,
914 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
917 /* Send the enabling command if we need to */
918 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
921 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
924 le16_add_cpu(&cmd.ssn, 1);
926 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
927 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
932 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
934 struct iwl_scd_txq_cfg_cmd cmd = {
936 .action = SCD_CFG_UPDATE_QUEUE_TID,
939 unsigned long tid_bitmap;
942 lockdep_assert_held(&mvm->mutex);
944 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
947 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
949 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
952 /* Find any TID for queue */
953 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
955 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
957 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
959 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
964 mvm->queue_info[queue].txq_tid = tid;
965 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
969 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
971 struct ieee80211_sta *sta;
972 struct iwl_mvm_sta *mvmsta;
975 unsigned long tid_bitmap;
976 unsigned int wdg_timeout;
980 /* queue sharing is disabled on new TX path */
981 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
984 lockdep_assert_held(&mvm->mutex);
986 sta_id = mvm->queue_info[queue].ra_sta_id;
987 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
989 /* Find TID for queue, and make sure it is the only one on the queue */
990 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
991 if (tid_bitmap != BIT(tid)) {
992 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
997 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1000 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1001 lockdep_is_held(&mvm->mutex));
1003 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1006 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1007 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1009 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1011 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1012 tid_to_mac80211_ac[tid], ssn,
1014 iwl_mvm_txq_from_tid(sta, tid));
1016 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1020 /* If aggs should be turned back on - do it */
1021 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1022 struct iwl_mvm_add_sta_cmd cmd = {0};
1024 mvmsta->tid_disable_agg &= ~BIT(tid);
1026 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1027 cmd.sta_id = mvmsta->sta_id;
1028 cmd.add_modify = STA_MODE_MODIFY;
1029 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1030 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1031 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1033 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1034 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1036 IWL_DEBUG_TX_QUEUES(mvm,
1037 "TXQ #%d is now aggregated again\n",
1040 /* Mark queue intenally as aggregating again */
1041 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1045 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1049 * Remove inactive TIDs of a given queue.
1050 * If all queue TIDs are inactive - mark the queue as inactive
1051 * If only some the queue TIDs are inactive - unmap them from the queue
1053 * Returns %true if all TIDs were removed and the queue could be reused.
1055 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1056 struct iwl_mvm_sta *mvmsta, int queue,
1057 unsigned long tid_bitmap,
1058 unsigned long *unshare_queues,
1059 unsigned long *changetid_queues)
1063 lockdep_assert_held(&mvmsta->lock);
1064 lockdep_assert_held(&mvm->mutex);
1066 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1069 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1070 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1071 /* If some TFDs are still queued - don't mark TID as inactive */
1072 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1073 tid_bitmap &= ~BIT(tid);
1075 /* Don't mark as inactive any TID that has an active BA */
1076 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1077 tid_bitmap &= ~BIT(tid);
1080 /* If all TIDs in the queue are inactive - return it can be reused */
1081 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1082 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1087 * If we are here, this is a shared queue and not all TIDs timed-out.
1088 * Remove the ones that did.
1090 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1093 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1094 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1096 q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1099 * We need to take into account a situation in which a TXQ was
1100 * allocated to TID x, and then turned shared by adding TIDs y
1101 * and z. If TID x becomes inactive and is removed from the TXQ,
1102 * ownership must be given to one of the remaining TIDs.
1103 * This is mainly because if TID x continues - a new queue can't
1104 * be allocated for it as long as it is an owner of another TXQ.
1106 * Mark this queue in the right bitmap, we'll send the command
1107 * to the firmware later.
1109 if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1110 set_bit(queue, changetid_queues);
1112 IWL_DEBUG_TX_QUEUES(mvm,
1113 "Removing inactive TID %d from shared Q:%d\n",
1117 IWL_DEBUG_TX_QUEUES(mvm,
1118 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1119 mvm->queue_info[queue].tid_bitmap);
1122 * There may be different TIDs with the same mac queues, so make
1123 * sure all TIDs have existing corresponding mac queues enabled
1125 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1127 /* If the queue is marked as shared - "unshare" it */
1128 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1129 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1130 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1132 set_bit(queue, unshare_queues);
1139 * Check for inactivity - this includes checking if any queue
1140 * can be unshared and finding one (and only one) that can be
1142 * This function is also invoked as a sort of clean-up task,
1143 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1145 * Returns the queue number, or -ENOSPC.
1147 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1149 unsigned long now = jiffies;
1150 unsigned long unshare_queues = 0;
1151 unsigned long changetid_queues = 0;
1152 int i, ret, free_queue = -ENOSPC;
1153 struct ieee80211_sta *queue_owner = NULL;
1155 lockdep_assert_held(&mvm->mutex);
1157 if (iwl_mvm_has_new_tx_api(mvm))
1162 /* we skip the CMD queue below by starting at 1 */
1163 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1165 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1166 struct ieee80211_sta *sta;
1167 struct iwl_mvm_sta *mvmsta;
1170 unsigned long inactive_tid_bitmap = 0;
1171 unsigned long queue_tid_bitmap;
1173 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1174 if (!queue_tid_bitmap)
1177 /* If TXQ isn't in active use anyway - nothing to do here... */
1178 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1179 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1182 /* Check to see if there are inactive TIDs on this queue */
1183 for_each_set_bit(tid, &queue_tid_bitmap,
1184 IWL_MAX_TID_COUNT + 1) {
1185 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1186 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1189 inactive_tid_bitmap |= BIT(tid);
1192 /* If all TIDs are active - finish check on this queue */
1193 if (!inactive_tid_bitmap)
1197 * If we are here - the queue hadn't been served recently and is
1201 sta_id = mvm->queue_info[i].ra_sta_id;
1202 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1205 * If the STA doesn't exist anymore, it isn't an error. It could
1206 * be that it was removed since getting the queues, and in this
1207 * case it should've inactivated its queues anyway.
1209 if (IS_ERR_OR_NULL(sta))
1212 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1214 spin_lock_bh(&mvmsta->lock);
1215 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1216 inactive_tid_bitmap,
1219 if (ret && free_queue < 0) {
1223 /* only unlock sta lock - we still need the queue info lock */
1224 spin_unlock_bh(&mvmsta->lock);
1228 /* Reconfigure queues requiring reconfiguation */
1229 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1230 iwl_mvm_unshare_queue(mvm, i);
1231 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1232 iwl_mvm_change_queue_tid(mvm, i);
1236 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1237 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1246 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1247 struct ieee80211_sta *sta, u8 ac, int tid)
1249 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1250 struct iwl_trans_txq_scd_cfg cfg = {
1251 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1252 .sta_id = mvmsta->sta_id,
1254 .frame_limit = IWL_FRAME_LIMIT,
1256 unsigned int wdg_timeout =
1257 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1260 unsigned long disable_agg_tids = 0;
1261 enum iwl_mvm_agg_state queue_state;
1262 bool shared_queue = false, inc_ssn;
1264 unsigned long tfd_queue_mask;
1267 lockdep_assert_held(&mvm->mutex);
1269 if (iwl_mvm_has_new_tx_api(mvm))
1270 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1272 spin_lock_bh(&mvmsta->lock);
1273 tfd_queue_mask = mvmsta->tfd_queue_msk;
1274 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1275 spin_unlock_bh(&mvmsta->lock);
1277 if (tid == IWL_MAX_TID_COUNT) {
1278 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1279 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1280 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1281 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1282 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1285 /* If no such queue is found, we'll use a DATA queue instead */
1288 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1289 (mvm->queue_info[mvmsta->reserved_queue].status ==
1290 IWL_MVM_QUEUE_RESERVED)) {
1291 queue = mvmsta->reserved_queue;
1292 mvm->queue_info[queue].reserved = true;
1293 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1297 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1298 IWL_MVM_DQA_MIN_DATA_QUEUE,
1299 IWL_MVM_DQA_MAX_DATA_QUEUE);
1301 /* try harder - perhaps kill an inactive queue */
1302 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1305 /* No free queue - we'll have to share */
1307 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1309 shared_queue = true;
1310 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1315 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1316 * to make sure no one else takes it.
1317 * This will allow avoiding re-acquiring the lock at the end of the
1318 * configuration. On error we'll mark it back as free.
1320 if (queue > 0 && !shared_queue)
1321 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1323 /* This shouldn't happen - out of queues */
1324 if (WARN_ON(queue <= 0)) {
1325 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1331 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1332 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1334 * Mark all DATA queues as allowing to be aggregated at some point
1336 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1337 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1339 IWL_DEBUG_TX_QUEUES(mvm,
1340 "Allocating %squeue #%d to sta %d on tid %d\n",
1341 shared_queue ? "shared " : "", queue,
1342 mvmsta->sta_id, tid);
1345 /* Disable any open aggs on this queue */
1346 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1348 if (disable_agg_tids) {
1349 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1351 iwl_mvm_invalidate_sta_queue(mvm, queue,
1352 disable_agg_tids, false);
1356 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1359 * Mark queue as shared in transport if shared
1360 * Note this has to be done after queue enablement because enablement
1361 * can also set this value, and there is no indication there to shared
1365 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1367 spin_lock_bh(&mvmsta->lock);
1369 * This looks racy, but it is not. We have only one packet for
1370 * this ra/tid in our Tx path since we stop the Qdisc when we
1371 * need to allocate a new TFD queue.
1374 mvmsta->tid_data[tid].seq_number += 0x10;
1375 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1377 mvmsta->tid_data[tid].txq_id = queue;
1378 mvmsta->tfd_queue_msk |= BIT(queue);
1379 queue_state = mvmsta->tid_data[tid].state;
1381 if (mvmsta->reserved_queue == queue)
1382 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1383 spin_unlock_bh(&mvmsta->lock);
1385 if (!shared_queue) {
1386 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1390 /* If we need to re-enable aggregations... */
1391 if (queue_state == IWL_AGG_ON) {
1392 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1397 /* Redirect queue, if needed */
1398 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1400 iwl_mvm_txq_from_tid(sta, tid));
1409 iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
1414 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1416 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1419 mutex_lock(&mvm->mutex);
1421 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1423 while (!list_empty(&mvm->add_stream_txqs)) {
1424 struct iwl_mvm_txq *mvmtxq;
1425 struct ieee80211_txq *txq;
1428 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1429 struct iwl_mvm_txq, list);
1431 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1434 if (tid == IEEE80211_NUM_TIDS)
1435 tid = IWL_MAX_TID_COUNT;
1438 * We can't really do much here, but if this fails we can't
1439 * transmit anyway - so just don't transmit the frame etc.
1440 * and let them back up ... we've tried our best to allocate
1441 * a queue in the function itself.
1443 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1444 list_del_init(&mvmtxq->list);
1448 list_del_init(&mvmtxq->list);
1450 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1454 mutex_unlock(&mvm->mutex);
1457 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1458 struct ieee80211_sta *sta,
1459 enum nl80211_iftype vif_type)
1461 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1464 /* queue reserving is disabled on new TX path */
1465 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1468 /* run the general cleanup/unsharing of queues */
1469 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1471 /* Make sure we have free resources for this STA */
1472 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1473 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1474 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1475 IWL_MVM_QUEUE_FREE))
1476 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1478 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1479 IWL_MVM_DQA_MIN_DATA_QUEUE,
1480 IWL_MVM_DQA_MAX_DATA_QUEUE);
1482 /* try again - this time kick out a queue if needed */
1483 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1485 IWL_ERR(mvm, "No available queues for new station\n");
1489 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1491 mvmsta->reserved_queue = queue;
1493 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1494 queue, mvmsta->sta_id);
1500 * In DQA mode, after a HW restart the queues should be allocated as before, in
1501 * order to avoid race conditions when there are shared queues. This function
1502 * does the re-mapping and queue allocation.
1504 * Note that re-enabling aggregations isn't done in this function.
1506 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1507 struct ieee80211_sta *sta)
1509 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1511 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1513 struct iwl_trans_txq_scd_cfg cfg = {
1514 .sta_id = mvm_sta->sta_id,
1515 .frame_limit = IWL_FRAME_LIMIT,
1518 /* Make sure reserved queue is still marked as such (if allocated) */
1519 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1520 mvm->queue_info[mvm_sta->reserved_queue].status =
1521 IWL_MVM_QUEUE_RESERVED;
1523 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1524 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1525 int txq_id = tid_data->txq_id;
1528 if (txq_id == IWL_MVM_INVALID_QUEUE)
1531 ac = tid_to_mac80211_ac[i];
1533 if (iwl_mvm_has_new_tx_api(mvm)) {
1534 IWL_DEBUG_TX_QUEUES(mvm,
1535 "Re-mapping sta %d tid %d\n",
1536 mvm_sta->sta_id, i);
1537 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1540 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1541 * to try again later, we have no other good way of
1545 txq_id = IWL_MVM_INVALID_QUEUE;
1546 tid_data->txq_id = txq_id;
1549 * Since we don't set the seq number after reset, and HW
1550 * sets it now, FW reset will cause the seq num to start
1551 * at 0 again, so driver will need to update it
1552 * internally as well, so it keeps in sync with real val
1554 tid_data->seq_number = 0;
1556 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1559 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1560 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1562 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1564 IWL_DEBUG_TX_QUEUES(mvm,
1565 "Re-mapping sta %d tid %d to queue %d\n",
1566 mvm_sta->sta_id, i, txq_id);
1568 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1569 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1574 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1575 struct iwl_mvm_int_sta *sta,
1577 u16 mac_id, u16 color)
1579 struct iwl_mvm_add_sta_cmd cmd;
1581 u32 status = ADD_STA_SUCCESS;
1583 lockdep_assert_held(&mvm->mutex);
1585 memset(&cmd, 0, sizeof(cmd));
1586 cmd.sta_id = sta->sta_id;
1588 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1589 sta->type == IWL_STA_AUX_ACTIVITY)
1590 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1592 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1595 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1596 cmd.station_type = sta->type;
1598 if (!iwl_mvm_has_new_tx_api(mvm))
1599 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1600 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1603 memcpy(cmd.addr, addr, ETH_ALEN);
1605 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1606 iwl_mvm_add_sta_cmd_size(mvm),
1611 switch (status & IWL_ADD_STA_STATUS_MASK) {
1612 case ADD_STA_SUCCESS:
1613 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1617 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1624 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1625 struct ieee80211_vif *vif,
1626 struct ieee80211_sta *sta)
1628 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1629 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1630 struct iwl_mvm_rxq_dup_data *dup_data;
1632 bool sta_update = false;
1633 unsigned int sta_flags = 0;
1635 lockdep_assert_held(&mvm->mutex);
1637 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1638 sta_id = iwl_mvm_find_free_sta_id(mvm,
1639 ieee80211_vif_type_p2p(vif));
1641 sta_id = mvm_sta->sta_id;
1643 if (sta_id == IWL_MVM_INVALID_STA)
1646 spin_lock_init(&mvm_sta->lock);
1648 /* if this is a HW restart re-alloc existing queues */
1649 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1650 struct iwl_mvm_int_sta tmp_sta = {
1652 .type = mvm_sta->sta_type,
1656 * First add an empty station since allocating
1657 * a queue requires a valid station
1659 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1660 mvmvif->id, mvmvif->color);
1664 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1666 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1670 mvm_sta->sta_id = sta_id;
1671 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1674 if (!mvm->trans->trans_cfg->gen2)
1675 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1677 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1678 mvm_sta->tx_protection = 0;
1679 mvm_sta->tt_tx_protection = false;
1680 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1682 /* HW restart, don't assume the memory has been zeroed */
1683 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1684 mvm_sta->tfd_queue_msk = 0;
1686 /* for HW restart - reset everything but the sequence number */
1687 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1688 u16 seq = mvm_sta->tid_data[i].seq_number;
1689 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1690 mvm_sta->tid_data[i].seq_number = seq;
1693 * Mark all queues for this STA as unallocated and defer TX
1694 * frames until the queue is allocated
1696 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1699 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1700 struct iwl_mvm_txq *mvmtxq =
1701 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1703 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1704 INIT_LIST_HEAD(&mvmtxq->list);
1705 atomic_set(&mvmtxq->tx_request, 0);
1708 mvm_sta->agg_tids = 0;
1710 if (iwl_mvm_has_new_rx_api(mvm) &&
1711 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1714 dup_data = kcalloc(mvm->trans->num_rx_queues,
1715 sizeof(*dup_data), GFP_KERNEL);
1719 * Initialize all the last_seq values to 0xffff which can never
1720 * compare equal to the frame's seq_ctrl in the check in
1721 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1722 * number and fragmented packets don't reach that function.
1724 * This thus allows receiving a packet with seqno 0 and the
1725 * retry bit set as the very first packet on a new TID.
1727 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1728 memset(dup_data[q].last_seq, 0xff,
1729 sizeof(dup_data[q].last_seq));
1730 mvm_sta->dup_data = dup_data;
1733 if (!iwl_mvm_has_new_tx_api(mvm)) {
1734 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1735 ieee80211_vif_type_p2p(vif));
1741 * if rs is registered with mac80211, then "add station" will be handled
1742 * via the corresponding ops, otherwise need to notify rate scaling here
1744 if (iwl_mvm_has_tlc_offload(mvm))
1745 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1747 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1749 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1752 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1756 if (vif->type == NL80211_IFTYPE_STATION) {
1758 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1759 mvmvif->ap_sta_id = sta_id;
1761 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1765 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1773 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1776 struct iwl_mvm_add_sta_cmd cmd = {};
1780 lockdep_assert_held(&mvm->mutex);
1782 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1783 cmd.sta_id = mvmsta->sta_id;
1784 cmd.add_modify = STA_MODE_MODIFY;
1785 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1786 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1788 status = ADD_STA_SUCCESS;
1789 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1790 iwl_mvm_add_sta_cmd_size(mvm),
1795 switch (status & IWL_ADD_STA_STATUS_MASK) {
1796 case ADD_STA_SUCCESS:
1797 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1802 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1811 * Remove a station from the FW table. Before sending the command to remove
1812 * the station validate that the station is indeed known to the driver (sanity
1815 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1817 struct ieee80211_sta *sta;
1818 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1823 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1824 lockdep_is_held(&mvm->mutex));
1826 /* Note: internal stations are marked as error values */
1828 IWL_ERR(mvm, "Invalid station id\n");
1832 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1833 sizeof(rm_sta_cmd), &rm_sta_cmd);
1835 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1842 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1843 struct ieee80211_vif *vif,
1844 struct ieee80211_sta *sta)
1846 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1849 lockdep_assert_held(&mvm->mutex);
1851 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1852 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1855 iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
1856 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1859 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1860 struct iwl_mvm_txq *mvmtxq =
1861 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1863 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1867 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1868 struct iwl_mvm_sta *mvm_sta)
1872 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1876 spin_lock_bh(&mvm_sta->lock);
1877 txq_id = mvm_sta->tid_data[i].txq_id;
1878 spin_unlock_bh(&mvm_sta->lock);
1880 if (txq_id == IWL_MVM_INVALID_QUEUE)
1883 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1891 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1892 struct ieee80211_vif *vif,
1893 struct ieee80211_sta *sta)
1895 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1896 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1897 u8 sta_id = mvm_sta->sta_id;
1900 lockdep_assert_held(&mvm->mutex);
1902 if (iwl_mvm_has_new_rx_api(mvm))
1903 kfree(mvm_sta->dup_data);
1905 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1909 /* flush its queues here since we are freeing mvm_sta */
1910 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1913 if (iwl_mvm_has_new_tx_api(mvm)) {
1914 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1916 u32 q_mask = mvm_sta->tfd_queue_msk;
1918 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1924 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1926 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1928 /* If there is a TXQ still marked as reserved - free it */
1929 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1930 u8 reserved_txq = mvm_sta->reserved_queue;
1931 enum iwl_mvm_queue_status *status;
1934 * If no traffic has gone through the reserved TXQ - it
1935 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1936 * should be manually marked as free again
1938 status = &mvm->queue_info[reserved_txq].status;
1939 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1940 (*status != IWL_MVM_QUEUE_FREE),
1941 "sta_id %d reserved txq %d status %d",
1942 sta_id, reserved_txq, *status))
1945 *status = IWL_MVM_QUEUE_FREE;
1948 if (vif->type == NL80211_IFTYPE_STATION &&
1949 mvmvif->ap_sta_id == sta_id) {
1950 /* if associated - we can't remove the AP STA now */
1954 /* unassoc - go ahead - remove the AP STA now */
1955 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1959 * This shouldn't happen - the TDLS channel switch should be canceled
1960 * before the STA is removed.
1962 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1963 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1964 cancel_delayed_work(&mvm->tdls_cs.dwork);
1968 * Make sure that the tx response code sees the station as -EBUSY and
1969 * calls the drain worker.
1971 spin_lock_bh(&mvm_sta->lock);
1972 spin_unlock_bh(&mvm_sta->lock);
1974 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1975 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1980 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1981 struct ieee80211_vif *vif,
1984 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1986 lockdep_assert_held(&mvm->mutex);
1988 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1992 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1993 struct iwl_mvm_int_sta *sta,
1994 u32 qmask, enum nl80211_iftype iftype,
1995 enum iwl_sta_type type)
1997 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1998 sta->sta_id == IWL_MVM_INVALID_STA) {
1999 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2000 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2004 sta->tfd_queue_msk = qmask;
2007 /* put a non-NULL value so iterating over the stations won't stop */
2008 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2012 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2014 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2015 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2016 sta->sta_id = IWL_MVM_INVALID_STA;
2019 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2022 unsigned int wdg_timeout =
2023 mvm->trans->trans_cfg->base_params->wd_timeout;
2024 struct iwl_trans_txq_scd_cfg cfg = {
2027 .tid = IWL_MAX_TID_COUNT,
2029 .frame_limit = IWL_FRAME_LIMIT,
2032 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2034 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2037 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2039 unsigned int wdg_timeout =
2040 mvm->trans->trans_cfg->base_params->wd_timeout;
2042 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2044 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2048 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2049 int maccolor, u8 *addr,
2050 struct iwl_mvm_int_sta *sta,
2051 u16 *queue, int fifo)
2055 /* Map queue to fifo - needs to happen before adding station */
2056 if (!iwl_mvm_has_new_tx_api(mvm))
2057 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2059 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2061 if (!iwl_mvm_has_new_tx_api(mvm))
2062 iwl_mvm_disable_txq(mvm, NULL, queue,
2068 * For 22000 firmware and on we cannot add queue to a station unknown
2069 * to firmware so enable queue here - after the station was added
2071 if (iwl_mvm_has_new_tx_api(mvm)) {
2074 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2076 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2086 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2090 lockdep_assert_held(&mvm->mutex);
2092 /* Allocate aux station and assign to it the aux queue */
2093 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2094 NL80211_IFTYPE_UNSPECIFIED,
2095 IWL_STA_AUX_ACTIVITY);
2100 * In CDB NICs we need to specify which lmac to use for aux activity
2101 * using the mac_id argument place to send lmac_id to the function
2103 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2104 &mvm->aux_sta, &mvm->aux_queue,
2105 IWL_MVM_TX_FIFO_MCAST);
2107 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2114 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2116 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2118 lockdep_assert_held(&mvm->mutex);
2120 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2121 NULL, &mvm->snif_sta,
2123 IWL_MVM_TX_FIFO_BE);
2126 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2130 lockdep_assert_held(&mvm->mutex);
2132 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2135 iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
2136 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2138 IWL_WARN(mvm, "Failed sending remove station\n");
2143 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2147 lockdep_assert_held(&mvm->mutex);
2149 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2152 iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
2153 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2155 IWL_WARN(mvm, "Failed sending remove station\n");
2156 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2161 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2163 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2167 * Send the add station command for the vif's broadcast station.
2168 * Assumes that the station was already allocated.
2170 * @mvm: the mvm component
2171 * @vif: the interface to which the broadcast station is added
2172 * @bsta: the broadcast station to add.
2174 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2176 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2177 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2178 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2179 const u8 *baddr = _baddr;
2182 unsigned int wdg_timeout =
2183 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2184 struct iwl_trans_txq_scd_cfg cfg = {
2185 .fifo = IWL_MVM_TX_FIFO_VO,
2186 .sta_id = mvmvif->bcast_sta.sta_id,
2187 .tid = IWL_MAX_TID_COUNT,
2189 .frame_limit = IWL_FRAME_LIMIT,
2192 lockdep_assert_held(&mvm->mutex);
2194 if (!iwl_mvm_has_new_tx_api(mvm)) {
2195 if (vif->type == NL80211_IFTYPE_AP ||
2196 vif->type == NL80211_IFTYPE_ADHOC) {
2197 queue = mvm->probe_queue;
2198 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2199 queue = mvm->p2p_dev_queue;
2201 WARN(1, "Missing required TXQ for adding bcast STA\n");
2205 bsta->tfd_queue_msk |= BIT(queue);
2207 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2210 if (vif->type == NL80211_IFTYPE_ADHOC)
2211 baddr = vif->bss_conf.bssid;
2213 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2216 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2217 mvmvif->id, mvmvif->color);
2222 * For 22000 firmware and on we cannot add queue to a station unknown
2223 * to firmware so enable queue here - after the station was added
2225 if (iwl_mvm_has_new_tx_api(mvm)) {
2226 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2230 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2234 if (vif->type == NL80211_IFTYPE_AP ||
2235 vif->type == NL80211_IFTYPE_ADHOC)
2236 mvm->probe_queue = queue;
2237 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2238 mvm->p2p_dev_queue = queue;
2244 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2245 struct ieee80211_vif *vif)
2247 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2248 u16 *queueptr, queue;
2250 lockdep_assert_held(&mvm->mutex);
2252 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2254 switch (vif->type) {
2255 case NL80211_IFTYPE_AP:
2256 case NL80211_IFTYPE_ADHOC:
2257 queueptr = &mvm->probe_queue;
2259 case NL80211_IFTYPE_P2P_DEVICE:
2260 queueptr = &mvm->p2p_dev_queue;
2263 WARN(1, "Can't free bcast queue on vif type %d\n",
2269 iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
2270 if (iwl_mvm_has_new_tx_api(mvm))
2273 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2274 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2277 /* Send the FW a request to remove the station from it's internal data
2278 * structures, but DO NOT remove the entry from the local data structures. */
2279 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2281 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2284 lockdep_assert_held(&mvm->mutex);
2286 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2288 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2290 IWL_WARN(mvm, "Failed sending remove station\n");
2294 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2296 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2298 lockdep_assert_held(&mvm->mutex);
2300 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2301 ieee80211_vif_type_p2p(vif),
2302 IWL_STA_GENERAL_PURPOSE);
2305 /* Allocate a new station entry for the broadcast station to the given vif,
2306 * and send it to the FW.
2307 * Note that each P2P mac should have its own broadcast station.
2309 * @mvm: the mvm component
2310 * @vif: the interface to which the broadcast station is added
2311 * @bsta: the broadcast station to add. */
2312 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2314 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2315 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2318 lockdep_assert_held(&mvm->mutex);
2320 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2324 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2327 iwl_mvm_dealloc_int_sta(mvm, bsta);
2332 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2334 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2336 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2340 * Send the FW a request to remove the station from it's internal data
2341 * structures, and in addition remove it from the local data structure.
2343 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2347 lockdep_assert_held(&mvm->mutex);
2349 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2351 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2357 * Allocate a new station entry for the multicast station to the given vif,
2358 * and send it to the FW.
2359 * Note that each AP/GO mac should have its own multicast station.
2361 * @mvm: the mvm component
2362 * @vif: the interface to which the multicast station is added
2364 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2366 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2367 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2368 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2369 const u8 *maddr = _maddr;
2370 struct iwl_trans_txq_scd_cfg cfg = {
2371 .fifo = vif->type == NL80211_IFTYPE_AP ?
2372 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2373 .sta_id = msta->sta_id,
2376 .frame_limit = IWL_FRAME_LIMIT,
2378 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2381 lockdep_assert_held(&mvm->mutex);
2383 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2384 vif->type != NL80211_IFTYPE_ADHOC))
2388 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2389 * invalid, so make sure we use the queue we want.
2390 * Note that this is done here as we want to avoid making DQA
2391 * changes in mac80211 layer.
2393 if (vif->type == NL80211_IFTYPE_ADHOC)
2394 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2397 * While in previous FWs we had to exclude cab queue from TFD queue
2398 * mask, now it is needed as any other queue.
2400 if (!iwl_mvm_has_new_tx_api(mvm) &&
2401 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2402 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2404 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2406 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2407 mvmvif->id, mvmvif->color);
2412 * Enable cab queue after the ADD_STA command is sent.
2413 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2414 * command with unknown station id, and for FW that doesn't support
2415 * station API since the cab queue is not included in the
2418 if (iwl_mvm_has_new_tx_api(mvm)) {
2419 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2426 mvmvif->cab_queue = queue;
2427 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2428 IWL_UCODE_TLV_API_STA_TYPE))
2429 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2434 iwl_mvm_dealloc_int_sta(mvm, msta);
2438 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2439 struct ieee80211_key_conf *keyconf,
2443 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2444 struct iwl_mvm_add_sta_key_cmd cmd;
2446 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2447 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2452 /* This is a valid situation for GTK removal */
2453 if (sta_id == IWL_MVM_INVALID_STA)
2456 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2457 STA_KEY_FLG_KEYID_MSK);
2458 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2459 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2462 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2465 * The fields assigned here are in the same location at the start
2466 * of the command, so we can do this union trick.
2468 u.cmd.common.key_flags = key_flags;
2469 u.cmd.common.key_offset = keyconf->hw_key_idx;
2470 u.cmd.common.sta_id = sta_id;
2472 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2474 status = ADD_STA_SUCCESS;
2475 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2479 case ADD_STA_SUCCESS:
2480 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2484 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2492 * Send the FW a request to remove the station from it's internal data
2493 * structures, and in addition remove it from the local data structure.
2495 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2497 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2500 lockdep_assert_held(&mvm->mutex);
2502 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2504 iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
2506 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2508 IWL_WARN(mvm, "Failed sending remove station\n");
2513 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2515 struct iwl_mvm_delba_data notif = {
2519 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2520 ¬if, sizeof(notif));
2523 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2524 struct iwl_mvm_baid_data *data)
2528 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2530 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2532 struct iwl_mvm_reorder_buffer *reorder_buf =
2533 &data->reorder_buf[i];
2534 struct iwl_mvm_reorder_buf_entry *entries =
2535 &data->entries[i * data->entries_per_queue];
2537 spin_lock_bh(&reorder_buf->lock);
2538 if (likely(!reorder_buf->num_stored)) {
2539 spin_unlock_bh(&reorder_buf->lock);
2544 * This shouldn't happen in regular DELBA since the internal
2545 * delBA notification should trigger a release of all frames in
2546 * the reorder buffer.
2550 for (j = 0; j < reorder_buf->buf_size; j++)
2551 __skb_queue_purge(&entries[j].e.frames);
2553 * Prevent timer re-arm. This prevents a very far fetched case
2554 * where we timed out on the notification. There may be prior
2555 * RX frames pending in the RX queue before the notification
2556 * that might get processed between now and the actual deletion
2557 * and we would re-arm the timer although we are deleting the
2560 reorder_buf->removed = true;
2561 spin_unlock_bh(&reorder_buf->lock);
2562 del_timer_sync(&reorder_buf->reorder_timer);
2566 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2567 struct iwl_mvm_baid_data *data,
2568 u16 ssn, u16 buf_size)
2572 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2573 struct iwl_mvm_reorder_buffer *reorder_buf =
2574 &data->reorder_buf[i];
2575 struct iwl_mvm_reorder_buf_entry *entries =
2576 &data->entries[i * data->entries_per_queue];
2579 reorder_buf->num_stored = 0;
2580 reorder_buf->head_sn = ssn;
2581 reorder_buf->buf_size = buf_size;
2582 /* rx reorder timer */
2583 timer_setup(&reorder_buf->reorder_timer,
2584 iwl_mvm_reorder_timer_expired, 0);
2585 spin_lock_init(&reorder_buf->lock);
2586 reorder_buf->mvm = mvm;
2587 reorder_buf->queue = i;
2588 reorder_buf->valid = false;
2589 for (j = 0; j < reorder_buf->buf_size; j++)
2590 __skb_queue_head_init(&entries[j].e.frames);
2594 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2595 struct iwl_mvm_sta *mvm_sta,
2596 bool start, int tid, u16 ssn,
2599 struct iwl_mvm_add_sta_cmd cmd = {
2600 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2601 .sta_id = mvm_sta->sta_id,
2602 .add_modify = STA_MODE_MODIFY,
2608 cmd.add_immediate_ba_tid = tid;
2609 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2610 cmd.rx_ba_window = cpu_to_le16(buf_size);
2611 cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2613 cmd.remove_immediate_ba_tid = tid;
2614 cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2617 status = ADD_STA_SUCCESS;
2618 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2619 iwl_mvm_add_sta_cmd_size(mvm),
2624 switch (status & IWL_ADD_STA_STATUS_MASK) {
2625 case ADD_STA_SUCCESS:
2626 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2627 start ? "start" : "stopp");
2628 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2629 !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2631 return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2632 case ADD_STA_IMMEDIATE_BA_FAILURE:
2633 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2636 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2637 start ? "start" : "stopp", status);
2642 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2643 struct iwl_mvm_sta *mvm_sta,
2644 bool start, int tid, u16 ssn,
2645 u16 buf_size, int baid)
2647 struct iwl_rx_baid_cfg_cmd cmd = {
2648 .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2649 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2651 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2654 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2657 cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2658 cmd.alloc.tid = tid;
2659 cmd.alloc.ssn = cpu_to_le16(ssn);
2660 cmd.alloc.win_size = cpu_to_le16(buf_size);
2662 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2663 cmd.remove_v1.baid = cpu_to_le32(baid);
2664 BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2666 cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2667 cmd.remove.tid = cpu_to_le32(tid);
2670 ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2676 /* ignore firmware baid on remove */
2680 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2681 start ? "start" : "stopp");
2683 if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2689 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2690 bool start, int tid, u16 ssn, u16 buf_size,
2693 if (fw_has_capa(&mvm->fw->ucode_capa,
2694 IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2695 return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
2696 tid, ssn, buf_size, baid);
2698 return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
2699 tid, ssn, buf_size);
2702 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2703 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2705 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2706 struct iwl_mvm_baid_data *baid_data = NULL;
2708 u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2711 lockdep_assert_held(&mvm->mutex);
2713 if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2714 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2718 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2719 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2721 /* sparse doesn't like the __align() so don't check */
2724 * The division below will be OK if either the cache line size
2725 * can be divided by the entry size (ALIGN will round up) or if
2726 * if the entry size can be divided by the cache line size, in
2727 * which case the ALIGN() will do nothing.
2729 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2730 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2734 * Upward align the reorder buffer size to fill an entire cache
2735 * line for each queue, to avoid sharing cache lines between
2738 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2741 * Allocate here so if allocation fails we can bail out early
2742 * before starting the BA session in the firmware
2744 baid_data = kzalloc(sizeof(*baid_data) +
2745 mvm->trans->num_rx_queues *
2752 * This division is why we need the above BUILD_BUG_ON(),
2753 * if that doesn't hold then this will not be right.
2755 baid_data->entries_per_queue =
2756 reorder_buf_size / sizeof(baid_data->entries[0]);
2759 if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2760 baid = mvm_sta->tid_to_baid[tid];
2762 /* we don't really need it in this case */
2766 /* Don't send command to remove (start=0) BAID during restart */
2767 if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2768 baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
2777 mvm->rx_ba_sessions++;
2779 if (!iwl_mvm_has_new_rx_api(mvm))
2782 baid_data->baid = baid;
2783 baid_data->timeout = timeout;
2784 baid_data->last_rx = jiffies;
2785 baid_data->rcu_ptr = &mvm->baid_map[baid];
2786 timer_setup(&baid_data->session_timer,
2787 iwl_mvm_rx_agg_session_expired, 0);
2788 baid_data->mvm = mvm;
2789 baid_data->tid = tid;
2790 baid_data->sta_id = mvm_sta->sta_id;
2792 mvm_sta->tid_to_baid[tid] = baid;
2794 mod_timer(&baid_data->session_timer,
2795 TU_TO_EXP_TIME(timeout * 2));
2797 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2799 * protect the BA data with RCU to cover a case where our
2800 * internal RX sync mechanism will timeout (not that it's
2801 * supposed to happen) and we will free the session data while
2802 * RX is being processed in parallel
2804 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2805 mvm_sta->sta_id, tid, baid);
2806 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2807 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2809 baid = mvm_sta->tid_to_baid[tid];
2811 if (mvm->rx_ba_sessions > 0)
2812 /* check that restart flow didn't zero the counter */
2813 mvm->rx_ba_sessions--;
2814 if (!iwl_mvm_has_new_rx_api(mvm))
2817 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2820 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2821 if (WARN_ON(!baid_data))
2824 /* synchronize all rx queues so we can safely delete */
2825 iwl_mvm_free_reorder(mvm, baid_data);
2826 del_timer_sync(&baid_data->session_timer);
2827 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2828 kfree_rcu(baid_data, rcu_head);
2829 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2832 * After we've deleted it, do another queue sync
2833 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2834 * running it won't find a new session in the old
2835 * BAID. It can find the NULL pointer for the BAID,
2836 * but we must not have it find a different session.
2838 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2848 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2849 int tid, u8 queue, bool start)
2851 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2852 struct iwl_mvm_add_sta_cmd cmd = {};
2856 lockdep_assert_held(&mvm->mutex);
2859 mvm_sta->tfd_queue_msk |= BIT(queue);
2860 mvm_sta->tid_disable_agg &= ~BIT(tid);
2862 /* In DQA-mode the queue isn't removed on agg termination */
2863 mvm_sta->tid_disable_agg |= BIT(tid);
2866 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2867 cmd.sta_id = mvm_sta->sta_id;
2868 cmd.add_modify = STA_MODE_MODIFY;
2869 if (!iwl_mvm_has_new_tx_api(mvm))
2870 cmd.modify_mask = STA_MODIFY_QUEUES;
2871 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2872 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2873 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2875 status = ADD_STA_SUCCESS;
2876 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2877 iwl_mvm_add_sta_cmd_size(mvm),
2882 switch (status & IWL_ADD_STA_STATUS_MASK) {
2883 case ADD_STA_SUCCESS:
2887 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2888 start ? "start" : "stopp", status);
2895 const u8 tid_to_mac80211_ac[] = {
2904 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2907 static const u8 tid_to_ucode_ac[] = {
2918 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2919 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2921 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2922 struct iwl_mvm_tid_data *tid_data;
2927 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2930 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2931 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2933 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2934 mvmsta->tid_data[tid].state);
2938 lockdep_assert_held(&mvm->mutex);
2940 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2941 iwl_mvm_has_new_tx_api(mvm)) {
2942 u8 ac = tid_to_mac80211_ac[tid];
2944 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2949 spin_lock_bh(&mvmsta->lock);
2952 * Note the possible cases:
2953 * 1. An enabled TXQ - TXQ needs to become agg'ed
2954 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2957 txq_id = mvmsta->tid_data[tid].txq_id;
2958 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2959 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2960 IWL_MVM_DQA_MIN_DATA_QUEUE,
2961 IWL_MVM_DQA_MAX_DATA_QUEUE);
2963 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2969 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2970 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2971 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2973 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2974 tid, IWL_MAX_HW_QUEUES - 1);
2977 } else if (unlikely(mvm->queue_info[txq_id].status ==
2978 IWL_MVM_QUEUE_SHARED)) {
2980 IWL_DEBUG_TX_QUEUES(mvm,
2981 "Can't start tid %d agg on shared queue!\n",
2986 IWL_DEBUG_TX_QUEUES(mvm,
2987 "AGG for tid %d will be on queue #%d\n",
2990 tid_data = &mvmsta->tid_data[tid];
2991 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2992 tid_data->txq_id = txq_id;
2993 *ssn = tid_data->ssn;
2995 IWL_DEBUG_TX_QUEUES(mvm,
2996 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2997 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2998 tid_data->next_reclaimed);
3001 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3002 * to align the wrap around of ssn so we compare relevant values.
3004 normalized_ssn = tid_data->ssn;
3005 if (mvm->trans->trans_cfg->gen2)
3006 normalized_ssn &= 0xff;
3008 if (normalized_ssn == tid_data->next_reclaimed) {
3009 tid_data->state = IWL_AGG_STARTING;
3010 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3012 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3013 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3017 spin_unlock_bh(&mvmsta->lock);
3022 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3023 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3026 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3027 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3028 unsigned int wdg_timeout =
3029 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3031 bool alloc_queue = true;
3032 enum iwl_mvm_queue_status queue_status;
3035 struct iwl_trans_txq_scd_cfg cfg = {
3036 .sta_id = mvmsta->sta_id,
3038 .frame_limit = buf_size,
3043 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3044 * manager, so this function should never be called in this case.
3046 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3049 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3050 != IWL_MAX_TID_COUNT);
3052 spin_lock_bh(&mvmsta->lock);
3053 ssn = tid_data->ssn;
3054 queue = tid_data->txq_id;
3055 tid_data->state = IWL_AGG_ON;
3056 mvmsta->agg_tids |= BIT(tid);
3057 tid_data->ssn = 0xffff;
3058 tid_data->amsdu_in_ampdu_allowed = amsdu;
3059 spin_unlock_bh(&mvmsta->lock);
3061 if (iwl_mvm_has_new_tx_api(mvm)) {
3063 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3064 * would have failed, so if we are here there is no need to
3066 * However, if aggregation size is different than the default
3067 * size, the scheduler should be reconfigured.
3068 * We cannot do this with the new TX API, so return unsupported
3069 * for now, until it will be offloaded to firmware..
3070 * Note that if SCD default value changes - this condition
3071 * should be updated as well.
3073 if (buf_size < IWL_FRAME_LIMIT)
3076 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3082 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3084 queue_status = mvm->queue_info[queue].status;
3086 /* Maybe there is no need to even alloc a queue... */
3087 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3088 alloc_queue = false;
3091 * Only reconfig the SCD for the queue if the window size has
3092 * changed from current (become smaller)
3094 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3096 * If reconfiguring an existing queue, it first must be
3099 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3103 "Error draining queue before reconfig\n");
3107 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3108 mvmsta->sta_id, tid,
3112 "Error reconfiguring TXQ #%d\n", queue);
3118 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3121 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3122 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3123 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3128 /* No need to mark as reserved */
3129 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3133 * Even though in theory the peer could have different
3134 * aggregation reorder buffer sizes for different sessions,
3135 * our ucode doesn't allow for that and has a global limit
3136 * for each station. Therefore, use the minimum of all the
3137 * aggregation sessions and our default value.
3139 mvmsta->max_agg_bufsize =
3140 min(mvmsta->max_agg_bufsize, buf_size);
3141 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3143 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3146 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3149 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3150 struct iwl_mvm_sta *mvmsta,
3151 struct iwl_mvm_tid_data *tid_data)
3153 u16 txq_id = tid_data->txq_id;
3155 lockdep_assert_held(&mvm->mutex);
3157 if (iwl_mvm_has_new_tx_api(mvm))
3161 * The TXQ is marked as reserved only if no traffic came through yet
3162 * This means no traffic has been sent on this TID (agg'd or not), so
3163 * we no longer have use for the queue. Since it hasn't even been
3164 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3167 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3168 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3169 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3173 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3174 struct ieee80211_sta *sta, u16 tid)
3176 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3177 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3182 * If mac80211 is cleaning its state, then say that we finished since
3183 * our state has been cleared anyway.
3185 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3186 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3190 spin_lock_bh(&mvmsta->lock);
3192 txq_id = tid_data->txq_id;
3194 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3195 mvmsta->sta_id, tid, txq_id, tid_data->state);
3197 mvmsta->agg_tids &= ~BIT(tid);
3199 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3201 switch (tid_data->state) {
3203 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3205 IWL_DEBUG_TX_QUEUES(mvm,
3206 "ssn = %d, next_recl = %d\n",
3207 tid_data->ssn, tid_data->next_reclaimed);
3209 tid_data->ssn = 0xffff;
3210 tid_data->state = IWL_AGG_OFF;
3211 spin_unlock_bh(&mvmsta->lock);
3213 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3215 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3217 case IWL_AGG_STARTING:
3218 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3220 * The agg session has been stopped before it was set up. This
3221 * can happen when the AddBA timer times out for example.
3224 /* No barriers since we are under mutex */
3225 lockdep_assert_held(&mvm->mutex);
3227 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3228 tid_data->state = IWL_AGG_OFF;
3233 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3234 mvmsta->sta_id, tid, tid_data->state);
3236 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3240 spin_unlock_bh(&mvmsta->lock);
3245 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3246 struct ieee80211_sta *sta, u16 tid)
3248 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3249 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3251 enum iwl_mvm_agg_state old_state;
3254 * First set the agg state to OFF to avoid calling
3255 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3257 spin_lock_bh(&mvmsta->lock);
3258 txq_id = tid_data->txq_id;
3259 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3260 mvmsta->sta_id, tid, txq_id, tid_data->state);
3261 old_state = tid_data->state;
3262 tid_data->state = IWL_AGG_OFF;
3263 mvmsta->agg_tids &= ~BIT(tid);
3264 spin_unlock_bh(&mvmsta->lock);
3266 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3268 if (old_state >= IWL_AGG_ON) {
3269 iwl_mvm_drain_sta(mvm, mvmsta, true);
3271 if (iwl_mvm_has_new_tx_api(mvm)) {
3272 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3274 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3275 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3277 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3278 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3279 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3282 iwl_mvm_drain_sta(mvm, mvmsta, false);
3284 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3290 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3292 int i, max = -1, max_offs = -1;
3294 lockdep_assert_held(&mvm->mutex);
3296 /* Pick the unused key offset with the highest 'deleted'
3297 * counter. Every time a key is deleted, all the counters
3298 * are incremented and the one that was just deleted is
3299 * reset to zero. Thus, the highest counter is the one
3300 * that was deleted longest ago. Pick that one.
3302 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3303 if (test_bit(i, mvm->fw_key_table))
3305 if (mvm->fw_key_deleted[i] > max) {
3306 max = mvm->fw_key_deleted[i];
3312 return STA_KEY_IDX_INVALID;
3317 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3318 struct ieee80211_vif *vif,
3319 struct ieee80211_sta *sta)
3321 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3324 return iwl_mvm_sta_from_mac80211(sta);
3327 * The device expects GTKs for station interfaces to be
3328 * installed as GTKs for the AP station. If we have no
3329 * station ID, then use AP's station ID.
3331 if (vif->type == NL80211_IFTYPE_STATION &&
3332 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3333 u8 sta_id = mvmvif->ap_sta_id;
3335 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3336 lockdep_is_held(&mvm->mutex));
3339 * It is possible that the 'sta' parameter is NULL,
3340 * for example when a GTK is removed - the sta_id will then
3341 * be the AP ID, and no station was passed by mac80211.
3343 if (IS_ERR_OR_NULL(sta))
3346 return iwl_mvm_sta_from_mac80211(sta);
3352 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3356 for (i = len - 1; i >= 0; i--) {
3357 if (pn1[i] > pn2[i])
3359 if (pn1[i] < pn2[i])
3366 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3368 struct ieee80211_key_conf *key, bool mcast,
3369 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3370 u8 key_offset, bool mfp)
3373 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3374 struct iwl_mvm_add_sta_key_cmd cmd;
3382 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3383 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3384 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3387 if (sta_id == IWL_MVM_INVALID_STA)
3390 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3391 STA_KEY_FLG_KEYID_MSK;
3392 key_flags = cpu_to_le16(keyidx);
3393 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3395 switch (key->cipher) {
3396 case WLAN_CIPHER_SUITE_TKIP:
3397 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3399 memcpy((void *)&u.cmd.tx_mic_key,
3400 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3403 memcpy((void *)&u.cmd.rx_mic_key,
3404 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3406 pn = atomic64_read(&key->tx_pn);
3409 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3410 for (i = 0; i < 5; i++)
3411 u.cmd_v1.tkip_rx_ttak[i] =
3412 cpu_to_le16(tkip_p1k[i]);
3414 memcpy(u.cmd.common.key, key->key, key->keylen);
3416 case WLAN_CIPHER_SUITE_CCMP:
3417 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3418 memcpy(u.cmd.common.key, key->key, key->keylen);
3420 pn = atomic64_read(&key->tx_pn);
3422 case WLAN_CIPHER_SUITE_WEP104:
3423 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3425 case WLAN_CIPHER_SUITE_WEP40:
3426 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3427 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3429 case WLAN_CIPHER_SUITE_GCMP_256:
3430 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3432 case WLAN_CIPHER_SUITE_GCMP:
3433 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3434 memcpy(u.cmd.common.key, key->key, key->keylen);
3436 pn = atomic64_read(&key->tx_pn);
3439 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3440 memcpy(u.cmd.common.key, key->key, key->keylen);
3444 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3446 key_flags |= cpu_to_le16(STA_KEY_MFP);
3448 u.cmd.common.key_offset = key_offset;
3449 u.cmd.common.key_flags = key_flags;
3450 u.cmd.common.sta_id = sta_id;
3452 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3457 for (; i < IEEE80211_NUM_TIDS; i++) {
3458 struct ieee80211_key_seq seq = {};
3459 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3461 /* there's a hole at 2/3 in FW format depending on version */
3462 int hole = api_ver >= 3 ? 0 : 2;
3464 ieee80211_get_key_rx_seq(key, i, &seq);
3466 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3467 rx_pn[0] = seq.tkip.iv16;
3468 rx_pn[1] = seq.tkip.iv16 >> 8;
3469 rx_pn[2 + hole] = seq.tkip.iv32;
3470 rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3471 rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3472 rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3473 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3475 rx_pn_len = seq.hw.seq_len;
3477 rx_pn[0] = seq.ccmp.pn[0];
3478 rx_pn[1] = seq.ccmp.pn[1];
3479 rx_pn[2 + hole] = seq.ccmp.pn[2];
3480 rx_pn[3 + hole] = seq.ccmp.pn[3];
3481 rx_pn[4 + hole] = seq.ccmp.pn[4];
3482 rx_pn[5 + hole] = seq.ccmp.pn[5];
3485 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3487 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3492 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3493 size = sizeof(u.cmd);
3495 size = sizeof(u.cmd_v1);
3498 status = ADD_STA_SUCCESS;
3499 if (cmd_flags & CMD_ASYNC)
3500 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3503 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3507 case ADD_STA_SUCCESS:
3508 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3512 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3519 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3520 struct ieee80211_key_conf *keyconf,
3521 u8 sta_id, bool remove_key)
3523 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3525 /* verify the key details match the required command's expectations */
3526 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3527 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3528 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3529 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3530 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3531 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3534 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3535 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3538 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3539 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3542 /* This is a valid situation for IGTK */
3543 if (sta_id == IWL_MVM_INVALID_STA)
3546 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3548 struct ieee80211_key_seq seq;
3551 switch (keyconf->cipher) {
3552 case WLAN_CIPHER_SUITE_AES_CMAC:
3553 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3555 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3556 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3557 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3563 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3564 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3565 igtk_cmd.ctrl_flags |=
3566 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3567 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3568 pn = seq.aes_cmac.pn;
3569 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3570 ((u64) pn[4] << 8) |
3571 ((u64) pn[3] << 16) |
3572 ((u64) pn[2] << 24) |
3573 ((u64) pn[1] << 32) |
3574 ((u64) pn[0] << 40));
3577 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3578 remove_key ? "removing" : "installing",
3579 keyconf->keyidx >= 6 ? "B" : "",
3580 keyconf->keyidx, igtk_cmd.sta_id);
3582 if (!iwl_mvm_has_new_rx_api(mvm)) {
3583 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3584 .ctrl_flags = igtk_cmd.ctrl_flags,
3585 .key_id = igtk_cmd.key_id,
3586 .sta_id = igtk_cmd.sta_id,
3587 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3590 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3591 ARRAY_SIZE(igtk_cmd_v1.igtk));
3592 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3593 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3595 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3596 sizeof(igtk_cmd), &igtk_cmd);
3600 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3601 struct ieee80211_vif *vif,
3602 struct ieee80211_sta *sta)
3604 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3609 if (vif->type == NL80211_IFTYPE_STATION &&
3610 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3611 u8 sta_id = mvmvif->ap_sta_id;
3612 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3613 lockdep_is_held(&mvm->mutex));
3621 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3622 struct ieee80211_vif *vif,
3623 struct ieee80211_sta *sta,
3624 struct ieee80211_key_conf *keyconf,
3629 struct ieee80211_key_seq seq;
3635 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3637 sta_id = mvm_sta->sta_id;
3639 } else if (vif->type == NL80211_IFTYPE_AP &&
3640 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3641 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3643 sta_id = mvmvif->mcast_sta.sta_id;
3645 IWL_ERR(mvm, "Failed to find station id\n");
3649 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3650 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3651 /* get phase 1 key from mac80211 */
3652 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3653 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3655 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3656 seq.tkip.iv32, p1k, 0, key_offset,
3660 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3661 0, NULL, 0, key_offset, mfp);
3664 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3665 struct ieee80211_vif *vif,
3666 struct ieee80211_sta *sta,
3667 struct ieee80211_key_conf *keyconf,
3670 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3671 struct iwl_mvm_sta *mvm_sta;
3672 u8 sta_id = IWL_MVM_INVALID_STA;
3674 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3676 lockdep_assert_held(&mvm->mutex);
3678 if (vif->type != NL80211_IFTYPE_AP ||
3679 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3680 /* Get the station id from the mvm local station table */
3681 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3683 IWL_ERR(mvm, "Failed to find station\n");
3686 sta_id = mvm_sta->sta_id;
3689 * It is possible that the 'sta' parameter is NULL, and thus
3690 * there is a need to retrieve the sta from the local station
3694 sta = rcu_dereference_protected(
3695 mvm->fw_id_to_mac_id[sta_id],
3696 lockdep_is_held(&mvm->mutex));
3697 if (IS_ERR_OR_NULL(sta)) {
3698 IWL_ERR(mvm, "Invalid station id\n");
3703 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3706 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3708 sta_id = mvmvif->mcast_sta.sta_id;
3711 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3712 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3713 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3714 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3718 /* If the key_offset is not pre-assigned, we need to find a
3719 * new offset to use. In normal cases, the offset is not
3720 * pre-assigned, but during HW_RESTART we want to reuse the
3721 * same indices, so we pass them when this function is called.
3723 * In D3 entry, we need to hardcoded the indices (because the
3724 * firmware hardcodes the PTK offset to 0). In this case, we
3725 * need to make sure we don't overwrite the hw_key_idx in the
3726 * keyconf structure, because otherwise we cannot configure
3727 * the original ones back when resuming.
3729 if (key_offset == STA_KEY_IDX_INVALID) {
3730 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3731 if (key_offset == STA_KEY_IDX_INVALID)
3733 keyconf->hw_key_idx = key_offset;
3736 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3741 * For WEP, the same key is used for multicast and unicast. Upload it
3742 * again, using the same key offset, and now pointing the other one
3743 * to the same key slot (offset).
3744 * If this fails, remove the original as well.
3746 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3747 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3749 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3750 key_offset, !mcast);
3752 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3757 __set_bit(key_offset, mvm->fw_key_table);
3760 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3761 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3762 sta ? sta->addr : zero_addr, ret);
3766 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3767 struct ieee80211_vif *vif,
3768 struct ieee80211_sta *sta,
3769 struct ieee80211_key_conf *keyconf)
3771 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3772 struct iwl_mvm_sta *mvm_sta;
3773 u8 sta_id = IWL_MVM_INVALID_STA;
3776 lockdep_assert_held(&mvm->mutex);
3778 /* Get the station from the mvm local station table */
3779 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3781 sta_id = mvm_sta->sta_id;
3782 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3783 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3786 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3787 keyconf->keyidx, sta_id);
3789 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3790 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3791 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3792 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3794 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3795 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3796 keyconf->hw_key_idx);
3800 /* track which key was deleted last */
3801 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3802 if (mvm->fw_key_deleted[i] < U8_MAX)
3803 mvm->fw_key_deleted[i]++;
3805 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3807 if (sta && !mvm_sta) {
3808 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3812 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3816 /* delete WEP key twice to get rid of (now useless) offset */
3817 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3818 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3819 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3824 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3825 struct ieee80211_vif *vif,
3826 struct ieee80211_key_conf *keyconf,
3827 struct ieee80211_sta *sta, u32 iv32,
3830 struct iwl_mvm_sta *mvm_sta;
3831 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3832 bool mfp = sta ? sta->mfp : false;
3836 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3837 if (WARN_ON_ONCE(!mvm_sta))
3839 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3840 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3847 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3848 struct ieee80211_sta *sta)
3850 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3851 struct iwl_mvm_add_sta_cmd cmd = {
3852 .add_modify = STA_MODE_MODIFY,
3853 .sta_id = mvmsta->sta_id,
3854 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3855 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3859 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3860 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3862 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3865 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3866 struct ieee80211_sta *sta,
3867 enum ieee80211_frame_release_type reason,
3868 u16 cnt, u16 tids, bool more_data,
3869 bool single_sta_queue)
3871 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3872 struct iwl_mvm_add_sta_cmd cmd = {
3873 .add_modify = STA_MODE_MODIFY,
3874 .sta_id = mvmsta->sta_id,
3875 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3876 .sleep_tx_count = cpu_to_le16(cnt),
3877 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3880 unsigned long _tids = tids;
3882 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3883 * Note that this field is reserved and unused by firmware not
3884 * supporting GO uAPSD, so it's safe to always do this.
3886 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3887 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3889 /* If we're releasing frames from aggregation or dqa queues then check
3890 * if all the queues that we're releasing frames from, combined, have:
3891 * - more frames than the service period, in which case more_data
3893 * - fewer than 'cnt' frames, in which case we need to adjust the
3894 * firmware command (but do that unconditionally)
3896 if (single_sta_queue) {
3897 int remaining = cnt;
3900 spin_lock_bh(&mvmsta->lock);
3901 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3902 struct iwl_mvm_tid_data *tid_data;
3905 tid_data = &mvmsta->tid_data[tid];
3907 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3908 if (n_queued > remaining) {
3913 remaining -= n_queued;
3915 sleep_tx_count = cnt - remaining;
3916 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3917 mvmsta->sleep_tx_count = sleep_tx_count;
3918 spin_unlock_bh(&mvmsta->lock);
3920 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3921 if (WARN_ON(cnt - remaining == 0)) {
3922 ieee80211_sta_eosp(sta);
3927 /* Note: this is ignored by firmware not supporting GO uAPSD */
3929 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3931 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3932 mvmsta->next_status_eosp = true;
3933 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3935 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3938 /* block the Tx queues until the FW updated the sleep Tx count */
3939 iwl_trans_block_txq_ptrs(mvm->trans, true);
3941 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3942 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3943 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3945 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3948 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3949 struct iwl_rx_cmd_buffer *rxb)
3951 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3952 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3953 struct ieee80211_sta *sta;
3954 u32 sta_id = le32_to_cpu(notif->sta_id);
3956 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3960 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3961 if (!IS_ERR_OR_NULL(sta))
3962 ieee80211_sta_eosp(sta);
3966 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3967 struct iwl_mvm_sta *mvmsta, bool disable)
3969 struct iwl_mvm_add_sta_cmd cmd = {
3970 .add_modify = STA_MODE_MODIFY,
3971 .sta_id = mvmsta->sta_id,
3972 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3973 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3974 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3978 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3979 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3981 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3984 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3985 struct ieee80211_sta *sta,
3988 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3990 spin_lock_bh(&mvm_sta->lock);
3992 if (mvm_sta->disable_tx == disable) {
3993 spin_unlock_bh(&mvm_sta->lock);
3997 mvm_sta->disable_tx = disable;
4000 * If sta PS state is handled by mac80211, tell it to start/stop
4001 * queuing tx for this station.
4003 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4004 ieee80211_sta_block_awake(mvm->hw, sta, disable);
4006 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4008 spin_unlock_bh(&mvm_sta->lock);
4011 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4012 struct iwl_mvm_vif *mvmvif,
4013 struct iwl_mvm_int_sta *sta,
4016 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4017 struct iwl_mvm_add_sta_cmd cmd = {
4018 .add_modify = STA_MODE_MODIFY,
4019 .sta_id = sta->sta_id,
4020 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4021 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4022 .mac_id_n_color = cpu_to_le32(id),
4026 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4027 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4029 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4032 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4033 struct iwl_mvm_vif *mvmvif,
4036 struct ieee80211_sta *sta;
4037 struct iwl_mvm_sta *mvm_sta;
4042 /* Block/unblock all the stations of the given mvmvif */
4043 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4044 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4045 if (IS_ERR_OR_NULL(sta))
4048 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4049 if (mvm_sta->mac_id_n_color !=
4050 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4053 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4058 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4061 /* Need to block/unblock also multicast station */
4062 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4063 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4064 &mvmvif->mcast_sta, disable);
4067 * Only unblock the broadcast station (FW blocks it for immediate
4068 * quiet, not the driver)
4070 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4071 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4072 &mvmvif->bcast_sta, disable);
4075 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4077 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4078 struct iwl_mvm_sta *mvmsta;
4082 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4085 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4090 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4092 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4095 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4096 * to align the wrap around of ssn so we compare relevant values.
4098 if (mvm->trans->trans_cfg->gen2)
4101 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4104 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4105 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4106 u8 *key, u32 key_len)
4110 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4111 struct ieee80211_key_conf *keyconf;
4113 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4114 NL80211_IFTYPE_UNSPECIFIED,
4119 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4121 IWL_MVM_TX_FIFO_BE);
4125 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4131 keyconf->cipher = cipher;
4132 memcpy(keyconf->key, key, key_len);
4133 keyconf->keylen = key_len;
4135 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4136 0, NULL, 0, 0, true);
4140 iwl_mvm_dealloc_int_sta(mvm, sta);
4144 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4145 struct ieee80211_vif *vif,
4148 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4149 .mac_id = cpu_to_le32(mac_id),
4153 ret = iwl_mvm_send_cmd_pdu(mvm,
4154 WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4156 sizeof(cancel_channel_switch_cmd),
4157 &cancel_channel_switch_cmd);
4159 IWL_ERR(mvm, "Failed to cancel the channel switch\n");