1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <net/mac80211.h>
70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm);
72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
74 struct ieee80211_key_conf *key, bool mcast,
75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
76 u8 key_offset, bool mfp);
79 * New version of ADD_STA_sta command added new fields at the end of the
80 * structure, so sending the size of the relevant API's structure is enough to
81 * support both API versions.
83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
85 if (iwl_mvm_has_new_rx_api(mvm) ||
86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
87 return sizeof(struct iwl_mvm_add_sta_cmd);
89 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
93 enum nl80211_iftype iftype)
98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
101 lockdep_assert_held(&mvm->mutex);
103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
104 if (iftype != NL80211_IFTYPE_STATION)
105 reserved_ids = BIT(0);
107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
109 if (BIT(sta_id) & reserved_ids)
112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
113 lockdep_is_held(&mvm->mutex)))
116 return IWL_MVM_INVALID_STA;
119 /* send station add/update command to firmware */
120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 bool update, unsigned int flags)
123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
124 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
125 .sta_id = mvm_sta->sta_id,
126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
127 .add_modify = update ? 1 : 0,
128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
129 STA_FLG_MIMO_EN_MSK |
130 STA_FLG_RTS_MIMO_PROT),
131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
135 u32 agg_size = 0, mpdu_dens = 0;
137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
138 add_sta_cmd.station_type = mvm_sta->sta_type;
140 if (!update || (flags & STA_MODIFY_QUEUES)) {
141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
143 if (!iwl_mvm_has_new_tx_api(mvm)) {
144 add_sta_cmd.tfd_queue_msk =
145 cpu_to_le32(mvm_sta->tfd_queue_msk);
147 if (flags & STA_MODIFY_QUEUES)
148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
150 WARN_ON(flags & STA_MODIFY_QUEUES);
154 switch (sta->bandwidth) {
155 case IEEE80211_STA_RX_BW_160:
156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
158 case IEEE80211_STA_RX_BW_80:
159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
161 case IEEE80211_STA_RX_BW_40:
162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
164 case IEEE80211_STA_RX_BW_20:
165 if (sta->ht_cap.ht_supported)
166 add_sta_cmd.station_flags |=
167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
171 switch (sta->rx_nss) {
173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
183 switch (sta->smps_mode) {
184 case IEEE80211_SMPS_AUTOMATIC:
185 case IEEE80211_SMPS_NUM_MODES:
188 case IEEE80211_SMPS_STATIC:
190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
193 case IEEE80211_SMPS_DYNAMIC:
194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
196 case IEEE80211_SMPS_OFF:
201 if (sta->ht_cap.ht_supported) {
202 add_sta_cmd.station_flags_msk |=
203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
204 STA_FLG_AGG_MPDU_DENS_MSK);
206 mpdu_dens = sta->ht_cap.ampdu_density;
209 if (sta->vht_cap.vht_supported) {
210 agg_size = sta->vht_cap.cap &
211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
214 } else if (sta->ht_cap.ht_supported) {
215 agg_size = sta->ht_cap.ampdu_factor;
218 add_sta_cmd.station_flags |=
219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
220 add_sta_cmd.station_flags |=
221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
229 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
231 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
233 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
235 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
240 status = ADD_STA_SUCCESS;
241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
242 iwl_mvm_add_sta_cmd_size(mvm),
243 &add_sta_cmd, &status);
247 switch (status & IWL_ADD_STA_STATUS_MASK) {
248 case ADD_STA_SUCCESS:
249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
253 IWL_ERR(mvm, "ADD_STA failed\n");
260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
262 struct iwl_mvm_baid_data *data =
263 from_timer(data, t, session_timer);
264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
265 struct iwl_mvm_baid_data *ba_data;
266 struct ieee80211_sta *sta;
267 struct iwl_mvm_sta *mvm_sta;
268 unsigned long timeout;
272 ba_data = rcu_dereference(*rcu_ptr);
274 if (WARN_ON(!ba_data))
277 if (!ba_data->timeout)
280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
281 if (time_is_after_jiffies(timeout)) {
282 mod_timer(&ba_data->session_timer, timeout);
287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
290 * sta should be valid unless the following happens:
291 * The firmware asserts which triggers a reconfig flow, but
292 * the reconfig fails before we set the pointer to sta into
293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
294 * A-MDPU and hence the timer continues to run. Then, the
295 * timer expires and sta is NULL.
300 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
301 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
302 sta->addr, ba_data->tid);
307 /* Disable aggregations for a bitmap of TIDs for a given station */
308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
309 unsigned long disable_agg_tids,
312 struct iwl_mvm_add_sta_cmd cmd = {};
313 struct ieee80211_sta *sta;
314 struct iwl_mvm_sta *mvmsta;
319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
322 spin_lock_bh(&mvm->queue_info_lock);
323 sta_id = mvm->queue_info[queue].ra_sta_id;
324 spin_unlock_bh(&mvm->queue_info_lock);
328 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
330 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
335 mvmsta = iwl_mvm_sta_from_mac80211(sta);
337 mvmsta->tid_disable_agg |= disable_agg_tids;
339 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
340 cmd.sta_id = mvmsta->sta_id;
341 cmd.add_modify = STA_MODE_MODIFY;
342 cmd.modify_mask = STA_MODIFY_QUEUES;
343 if (disable_agg_tids)
344 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
346 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
347 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
348 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
352 /* Notify FW of queue removal from the STA queues */
353 status = ADD_STA_SUCCESS;
354 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
355 iwl_mvm_add_sta_cmd_size(mvm),
361 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
362 int mac80211_queue, u8 tid, u8 flags)
364 struct iwl_scd_txq_cfg_cmd cmd = {
366 .action = SCD_CFG_DISABLE_QUEUE,
368 bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
371 if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
374 if (iwl_mvm_has_new_tx_api(mvm)) {
375 spin_lock_bh(&mvm->queue_info_lock);
377 if (remove_mac_queue)
378 mvm->hw_queue_to_mac80211[queue] &=
379 ~BIT(mac80211_queue);
381 spin_unlock_bh(&mvm->queue_info_lock);
383 iwl_trans_txq_free(mvm->trans, queue);
388 spin_lock_bh(&mvm->queue_info_lock);
390 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
391 spin_unlock_bh(&mvm->queue_info_lock);
395 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
398 * If there is another TID with the same AC - don't remove the MAC queue
401 if (tid < IWL_MAX_TID_COUNT) {
402 unsigned long tid_bitmap =
403 mvm->queue_info[queue].tid_bitmap;
404 int ac = tid_to_mac80211_ac[tid];
407 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
408 if (tid_to_mac80211_ac[i] == ac)
409 remove_mac_queue = false;
413 if (remove_mac_queue)
414 mvm->hw_queue_to_mac80211[queue] &=
415 ~BIT(mac80211_queue);
417 cmd.action = mvm->queue_info[queue].tid_bitmap ?
418 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
419 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
420 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
422 IWL_DEBUG_TX_QUEUES(mvm,
423 "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
425 mvm->queue_info[queue].tid_bitmap,
426 mvm->hw_queue_to_mac80211[queue]);
428 /* If the queue is still enabled - nothing left to do in this func */
429 if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
430 spin_unlock_bh(&mvm->queue_info_lock);
434 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
435 cmd.tid = mvm->queue_info[queue].txq_tid;
437 /* Make sure queue info is correct even though we overwrite it */
438 WARN(mvm->queue_info[queue].tid_bitmap ||
439 mvm->hw_queue_to_mac80211[queue],
440 "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
441 queue, mvm->hw_queue_to_mac80211[queue],
442 mvm->queue_info[queue].tid_bitmap);
444 /* If we are here - the queue is freed and we can zero out these vals */
445 mvm->queue_info[queue].tid_bitmap = 0;
446 mvm->hw_queue_to_mac80211[queue] = 0;
448 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
449 mvm->queue_info[queue].reserved = false;
451 spin_unlock_bh(&mvm->queue_info_lock);
453 iwl_trans_txq_disable(mvm->trans, queue, false);
454 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
455 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
458 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
463 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
465 struct ieee80211_sta *sta;
466 struct iwl_mvm_sta *mvmsta;
467 unsigned long tid_bitmap;
468 unsigned long agg_tids = 0;
472 lockdep_assert_held(&mvm->mutex);
474 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
477 spin_lock_bh(&mvm->queue_info_lock);
478 sta_id = mvm->queue_info[queue].ra_sta_id;
479 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
480 spin_unlock_bh(&mvm->queue_info_lock);
482 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
483 lockdep_is_held(&mvm->mutex));
485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
488 mvmsta = iwl_mvm_sta_from_mac80211(sta);
490 spin_lock_bh(&mvmsta->lock);
491 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
492 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
493 agg_tids |= BIT(tid);
495 spin_unlock_bh(&mvmsta->lock);
501 * Remove a queue from a station's resources.
502 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
503 * doesn't disable the queue
505 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
507 struct ieee80211_sta *sta;
508 struct iwl_mvm_sta *mvmsta;
509 unsigned long tid_bitmap;
510 unsigned long disable_agg_tids = 0;
514 lockdep_assert_held(&mvm->mutex);
516 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
519 spin_lock_bh(&mvm->queue_info_lock);
520 sta_id = mvm->queue_info[queue].ra_sta_id;
521 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
522 spin_unlock_bh(&mvm->queue_info_lock);
526 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
528 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
533 mvmsta = iwl_mvm_sta_from_mac80211(sta);
535 spin_lock_bh(&mvmsta->lock);
536 /* Unmap MAC queues and TIDs from this queue */
537 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
538 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
539 disable_agg_tids |= BIT(tid);
540 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
543 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
544 spin_unlock_bh(&mvmsta->lock);
548 return disable_agg_tids;
551 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
554 struct iwl_mvm_sta *mvmsta;
555 u8 txq_curr_ac, sta_id, tid;
556 unsigned long disable_agg_tids = 0;
560 lockdep_assert_held(&mvm->mutex);
562 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
565 spin_lock_bh(&mvm->queue_info_lock);
566 txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
567 sta_id = mvm->queue_info[queue].ra_sta_id;
568 tid = mvm->queue_info[queue].txq_tid;
569 spin_unlock_bh(&mvm->queue_info_lock);
571 same_sta = sta_id == new_sta_id;
573 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
574 if (WARN_ON(!mvmsta))
577 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
578 /* Disable the queue */
579 if (disable_agg_tids)
580 iwl_mvm_invalidate_sta_queue(mvm, queue,
581 disable_agg_tids, false);
583 ret = iwl_mvm_disable_txq(mvm, queue,
584 mvmsta->vif->hw_queue[txq_curr_ac],
588 "Failed to free inactive queue %d (ret=%d)\n",
594 /* If TXQ is allocated to another STA, update removal in FW */
596 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
601 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
602 unsigned long tfd_queue_mask, u8 ac)
605 u8 ac_to_queue[IEEE80211_NUM_ACS];
609 * This protects us against grabbing a queue that's being reconfigured
610 * by the inactivity checker.
612 lockdep_assert_held(&mvm->mutex);
613 lockdep_assert_held(&mvm->queue_info_lock);
615 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
618 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
620 /* See what ACs the existing queues for this STA have */
621 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
622 /* Only DATA queues can be shared */
623 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
624 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
627 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
631 * The queue to share is chosen only from DATA queues as follows (in
632 * descending priority):
635 * 3. Highest AC queue that is lower than new AC
636 * 4. Any existing AC (there always is at least 1 DATA queue)
639 /* Priority 1: An AC_BE queue */
640 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
641 queue = ac_to_queue[IEEE80211_AC_BE];
642 /* Priority 2: Same AC queue */
643 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
644 queue = ac_to_queue[ac];
645 /* Priority 3a: If new AC is VO and VI exists - use VI */
646 else if (ac == IEEE80211_AC_VO &&
647 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
648 queue = ac_to_queue[IEEE80211_AC_VI];
649 /* Priority 3b: No BE so only AC less than the new one is BK */
650 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
651 queue = ac_to_queue[IEEE80211_AC_BK];
652 /* Priority 4a: No BE nor BK - use VI if exists */
653 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
654 queue = ac_to_queue[IEEE80211_AC_VI];
655 /* Priority 4b: No BE, BK nor VI - use VO if exists */
656 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
657 queue = ac_to_queue[IEEE80211_AC_VO];
659 /* Make sure queue found (or not) is legal */
660 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
661 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
662 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
663 IWL_ERR(mvm, "No DATA queues available to share\n");
671 * If a given queue has a higher AC than the TID stream that is being compared
672 * to, the queue needs to be redirected to the lower AC. This function does that
673 * in such a case, otherwise - if no redirection required - it does nothing,
674 * unless the %force param is true.
676 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
677 int ac, int ssn, unsigned int wdg_timeout,
680 struct iwl_scd_txq_cfg_cmd cmd = {
682 .action = SCD_CFG_DISABLE_QUEUE,
688 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
692 * If the AC is lower than current one - FIFO needs to be redirected to
693 * the lowest one of the streams in the queue. Check if this is needed
695 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
696 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
697 * we need to check if the numerical value of X is LARGER than of Y.
699 spin_lock_bh(&mvm->queue_info_lock);
700 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
701 spin_unlock_bh(&mvm->queue_info_lock);
703 IWL_DEBUG_TX_QUEUES(mvm,
704 "No redirection needed on TXQ #%d\n",
709 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
710 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
711 cmd.tid = mvm->queue_info[queue].txq_tid;
712 mq = mvm->hw_queue_to_mac80211[queue];
713 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
714 spin_unlock_bh(&mvm->queue_info_lock);
716 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
717 queue, iwl_mvm_ac_to_tx_fifo[ac]);
719 /* Stop MAC queues and wait for this queue to empty */
720 iwl_mvm_stop_mac_queues(mvm, mq);
721 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
723 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
729 /* Before redirecting the queue we need to de-activate it */
730 iwl_trans_txq_disable(mvm->trans, queue, false);
731 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
733 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
736 /* Make sure the SCD wrptr is correctly set before reconfiguring */
737 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
739 /* Update the TID "owner" of the queue */
740 spin_lock_bh(&mvm->queue_info_lock);
741 mvm->queue_info[queue].txq_tid = tid;
742 spin_unlock_bh(&mvm->queue_info_lock);
744 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
746 /* Redirect to lower AC */
747 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
748 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
750 /* Update AC marking of the queue */
751 spin_lock_bh(&mvm->queue_info_lock);
752 mvm->queue_info[queue].mac80211_ac = ac;
753 spin_unlock_bh(&mvm->queue_info_lock);
756 * Mark queue as shared in transport if shared
757 * Note this has to be done after queue enablement because enablement
758 * can also set this value, and there is no indication there to shared
762 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
765 /* Continue using the MAC queues */
766 iwl_mvm_start_mac_queues(mvm, mq);
771 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
776 lockdep_assert_held(&mvm->queue_info_lock);
778 /* This should not be hit with new TX path */
779 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
782 /* Start by looking for a free queue */
783 for (i = minq; i <= maxq; i++)
784 if (mvm->queue_info[i].tid_bitmap == 0 &&
785 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
791 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
792 u8 sta_id, u8 tid, unsigned int timeout)
794 int queue, size = IWL_DEFAULT_QUEUE_SIZE;
796 if (tid == IWL_MAX_TID_COUNT) {
798 size = IWL_MGMT_QUEUE_SIZE;
800 queue = iwl_trans_txq_alloc(mvm->trans,
801 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
802 sta_id, tid, SCD_QUEUE_CFG, size, timeout);
805 IWL_DEBUG_TX_QUEUES(mvm,
806 "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
811 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
814 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
815 IWL_DEBUG_TX_QUEUES(mvm,
816 "Enabling TXQ #%d (mac80211 map:0x%x)\n",
817 queue, mvm->hw_queue_to_mac80211[queue]);
822 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
823 struct ieee80211_sta *sta, u8 ac,
826 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
827 unsigned int wdg_timeout =
828 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
829 u8 mac_queue = mvmsta->vif->hw_queue[ac];
832 lockdep_assert_held(&mvm->mutex);
834 IWL_DEBUG_TX_QUEUES(mvm,
835 "Allocating queue for sta %d on tid %d\n",
836 mvmsta->sta_id, tid);
837 queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
842 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
844 spin_lock_bh(&mvmsta->lock);
845 mvmsta->tid_data[tid].txq_id = queue;
846 spin_unlock_bh(&mvmsta->lock);
851 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
852 int mac80211_queue, u8 sta_id, u8 tid)
854 bool enable_queue = true;
856 spin_lock_bh(&mvm->queue_info_lock);
858 /* Make sure this TID isn't already enabled */
859 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
860 spin_unlock_bh(&mvm->queue_info_lock);
861 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
866 /* Update mappings and refcounts */
867 if (mvm->queue_info[queue].tid_bitmap)
868 enable_queue = false;
870 if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
871 WARN(mac80211_queue >=
872 BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
873 "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
874 mac80211_queue, queue, sta_id, tid);
875 mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
878 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
879 mvm->queue_info[queue].ra_sta_id = sta_id;
882 if (tid != IWL_MAX_TID_COUNT)
883 mvm->queue_info[queue].mac80211_ac =
884 tid_to_mac80211_ac[tid];
886 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
888 mvm->queue_info[queue].txq_tid = tid;
891 IWL_DEBUG_TX_QUEUES(mvm,
892 "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
893 queue, mvm->queue_info[queue].tid_bitmap,
894 mvm->hw_queue_to_mac80211[queue]);
896 spin_unlock_bh(&mvm->queue_info_lock);
901 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
902 int mac80211_queue, u16 ssn,
903 const struct iwl_trans_txq_scd_cfg *cfg,
904 unsigned int wdg_timeout)
906 struct iwl_scd_txq_cfg_cmd cmd = {
908 .action = SCD_CFG_ENABLE_QUEUE,
909 .window = cfg->frame_limit,
910 .sta_id = cfg->sta_id,
911 .ssn = cpu_to_le16(ssn),
912 .tx_fifo = cfg->fifo,
913 .aggregate = cfg->aggregate,
918 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
921 /* Send the enabling command if we need to */
922 if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
923 cfg->sta_id, cfg->tid))
926 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
929 le16_add_cpu(&cmd.ssn, 1);
931 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
932 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
937 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
939 struct iwl_scd_txq_cfg_cmd cmd = {
941 .action = SCD_CFG_UPDATE_QUEUE_TID,
944 unsigned long tid_bitmap;
947 lockdep_assert_held(&mvm->mutex);
949 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
952 spin_lock_bh(&mvm->queue_info_lock);
953 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
954 spin_unlock_bh(&mvm->queue_info_lock);
956 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
959 /* Find any TID for queue */
960 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
962 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
964 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
966 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
971 spin_lock_bh(&mvm->queue_info_lock);
972 mvm->queue_info[queue].txq_tid = tid;
973 spin_unlock_bh(&mvm->queue_info_lock);
974 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
978 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
980 struct ieee80211_sta *sta;
981 struct iwl_mvm_sta *mvmsta;
984 unsigned long tid_bitmap;
985 unsigned int wdg_timeout;
989 /* queue sharing is disabled on new TX path */
990 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
993 lockdep_assert_held(&mvm->mutex);
995 spin_lock_bh(&mvm->queue_info_lock);
996 sta_id = mvm->queue_info[queue].ra_sta_id;
997 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
998 spin_unlock_bh(&mvm->queue_info_lock);
1000 /* Find TID for queue, and make sure it is the only one on the queue */
1001 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
1002 if (tid_bitmap != BIT(tid)) {
1003 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
1008 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1011 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1012 lockdep_is_held(&mvm->mutex));
1014 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1017 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1018 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1020 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1022 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
1023 tid_to_mac80211_ac[tid], ssn,
1026 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1030 /* If aggs should be turned back on - do it */
1031 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1032 struct iwl_mvm_add_sta_cmd cmd = {0};
1034 mvmsta->tid_disable_agg &= ~BIT(tid);
1036 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1037 cmd.sta_id = mvmsta->sta_id;
1038 cmd.add_modify = STA_MODE_MODIFY;
1039 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1040 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1041 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1043 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1044 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1046 IWL_DEBUG_TX_QUEUES(mvm,
1047 "TXQ #%d is now aggregated again\n",
1050 /* Mark queue intenally as aggregating again */
1051 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1055 spin_lock_bh(&mvm->queue_info_lock);
1056 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1057 spin_unlock_bh(&mvm->queue_info_lock);
1061 * Remove inactive TIDs of a given queue.
1062 * If all queue TIDs are inactive - mark the queue as inactive
1063 * If only some the queue TIDs are inactive - unmap them from the queue
1065 * Returns %true if all TIDs were removed and the queue could be reused.
1067 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1068 struct iwl_mvm_sta *mvmsta, int queue,
1069 unsigned long tid_bitmap,
1070 unsigned long *unshare_queues,
1071 unsigned long *changetid_queues)
1075 lockdep_assert_held(&mvmsta->lock);
1076 lockdep_assert_held(&mvm->queue_info_lock);
1078 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1081 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1082 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1083 /* If some TFDs are still queued - don't mark TID as inactive */
1084 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1085 tid_bitmap &= ~BIT(tid);
1087 /* Don't mark as inactive any TID that has an active BA */
1088 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1089 tid_bitmap &= ~BIT(tid);
1092 /* If all TIDs in the queue are inactive - return it can be reused */
1093 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1094 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1099 * If we are here, this is a shared queue and not all TIDs timed-out.
1100 * Remove the ones that did.
1102 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1103 int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
1106 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1107 mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
1108 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1110 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1113 * We need to take into account a situation in which a TXQ was
1114 * allocated to TID x, and then turned shared by adding TIDs y
1115 * and z. If TID x becomes inactive and is removed from the TXQ,
1116 * ownership must be given to one of the remaining TIDs.
1117 * This is mainly because if TID x continues - a new queue can't
1118 * be allocated for it as long as it is an owner of another TXQ.
1120 * Mark this queue in the right bitmap, we'll send the command
1121 * to the firmware later.
1123 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1124 set_bit(queue, changetid_queues);
1126 IWL_DEBUG_TX_QUEUES(mvm,
1127 "Removing inactive TID %d from shared Q:%d\n",
1131 IWL_DEBUG_TX_QUEUES(mvm,
1132 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1133 mvm->queue_info[queue].tid_bitmap);
1136 * There may be different TIDs with the same mac queues, so make
1137 * sure all TIDs have existing corresponding mac queues enabled
1139 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1140 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1141 mvm->hw_queue_to_mac80211[queue] |=
1142 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1145 /* If the queue is marked as shared - "unshare" it */
1146 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1147 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1148 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1150 set_bit(queue, unshare_queues);
1157 * Check for inactivity - this includes checking if any queue
1158 * can be unshared and finding one (and only one) that can be
1160 * This function is also invoked as a sort of clean-up task,
1161 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1163 * Returns the queue number, or -ENOSPC.
1165 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1167 unsigned long now = jiffies;
1168 unsigned long unshare_queues = 0;
1169 unsigned long changetid_queues = 0;
1170 int i, ret, free_queue = -ENOSPC;
1172 lockdep_assert_held(&mvm->mutex);
1174 if (iwl_mvm_has_new_tx_api(mvm))
1177 spin_lock_bh(&mvm->queue_info_lock);
1181 /* we skip the CMD queue below by starting at 1 */
1182 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1184 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1185 struct ieee80211_sta *sta;
1186 struct iwl_mvm_sta *mvmsta;
1189 unsigned long inactive_tid_bitmap = 0;
1190 unsigned long queue_tid_bitmap;
1192 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1193 if (!queue_tid_bitmap)
1196 /* If TXQ isn't in active use anyway - nothing to do here... */
1197 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1198 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1201 /* Check to see if there are inactive TIDs on this queue */
1202 for_each_set_bit(tid, &queue_tid_bitmap,
1203 IWL_MAX_TID_COUNT + 1) {
1204 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1205 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1208 inactive_tid_bitmap |= BIT(tid);
1211 /* If all TIDs are active - finish check on this queue */
1212 if (!inactive_tid_bitmap)
1216 * If we are here - the queue hadn't been served recently and is
1220 sta_id = mvm->queue_info[i].ra_sta_id;
1221 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1224 * If the STA doesn't exist anymore, it isn't an error. It could
1225 * be that it was removed since getting the queues, and in this
1226 * case it should've inactivated its queues anyway.
1228 if (IS_ERR_OR_NULL(sta))
1231 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1233 /* this isn't so nice, but works OK due to the way we loop */
1234 spin_unlock(&mvm->queue_info_lock);
1236 /* and we need this locking order */
1237 spin_lock(&mvmsta->lock);
1238 spin_lock(&mvm->queue_info_lock);
1239 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1240 inactive_tid_bitmap,
1243 if (ret >= 0 && free_queue < 0)
1245 /* only unlock sta lock - we still need the queue info lock */
1246 spin_unlock(&mvmsta->lock);
1250 spin_unlock_bh(&mvm->queue_info_lock);
1252 /* Reconfigure queues requiring reconfiguation */
1253 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1254 iwl_mvm_unshare_queue(mvm, i);
1255 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1256 iwl_mvm_change_queue_tid(mvm, i);
1258 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1259 ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
1268 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1269 struct ieee80211_sta *sta, u8 ac, int tid,
1270 struct ieee80211_hdr *hdr)
1272 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1273 struct iwl_trans_txq_scd_cfg cfg = {
1274 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1275 .sta_id = mvmsta->sta_id,
1277 .frame_limit = IWL_FRAME_LIMIT,
1279 unsigned int wdg_timeout =
1280 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1281 u8 mac_queue = mvmsta->vif->hw_queue[ac];
1283 unsigned long disable_agg_tids = 0;
1284 enum iwl_mvm_agg_state queue_state;
1285 bool shared_queue = false, inc_ssn;
1287 unsigned long tfd_queue_mask;
1290 lockdep_assert_held(&mvm->mutex);
1292 if (iwl_mvm_has_new_tx_api(mvm))
1293 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1295 spin_lock_bh(&mvmsta->lock);
1296 tfd_queue_mask = mvmsta->tfd_queue_msk;
1297 spin_unlock_bh(&mvmsta->lock);
1299 spin_lock_bh(&mvm->queue_info_lock);
1302 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
1305 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1306 ieee80211_is_qos_nullfunc(hdr->frame_control)) {
1307 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1308 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1309 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1310 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1311 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1314 /* If no such queue is found, we'll use a DATA queue instead */
1317 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1318 (mvm->queue_info[mvmsta->reserved_queue].status ==
1319 IWL_MVM_QUEUE_RESERVED)) {
1320 queue = mvmsta->reserved_queue;
1321 mvm->queue_info[queue].reserved = true;
1322 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1326 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1327 IWL_MVM_DQA_MIN_DATA_QUEUE,
1328 IWL_MVM_DQA_MAX_DATA_QUEUE);
1330 spin_unlock_bh(&mvm->queue_info_lock);
1332 /* try harder - perhaps kill an inactive queue */
1333 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1335 spin_lock_bh(&mvm->queue_info_lock);
1338 /* No free queue - we'll have to share */
1340 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1342 shared_queue = true;
1343 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1348 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1349 * to make sure no one else takes it.
1350 * This will allow avoiding re-acquiring the lock at the end of the
1351 * configuration. On error we'll mark it back as free.
1353 if (queue > 0 && !shared_queue)
1354 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1356 spin_unlock_bh(&mvm->queue_info_lock);
1358 /* This shouldn't happen - out of queues */
1359 if (WARN_ON(queue <= 0)) {
1360 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1366 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1367 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1369 * Mark all DATA queues as allowing to be aggregated at some point
1371 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1372 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1374 IWL_DEBUG_TX_QUEUES(mvm,
1375 "Allocating %squeue #%d to sta %d on tid %d\n",
1376 shared_queue ? "shared " : "", queue,
1377 mvmsta->sta_id, tid);
1380 /* Disable any open aggs on this queue */
1381 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1383 if (disable_agg_tids) {
1384 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1386 iwl_mvm_invalidate_sta_queue(mvm, queue,
1387 disable_agg_tids, false);
1391 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1392 inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
1393 ssn, &cfg, wdg_timeout);
1395 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1396 le16_add_cpu(&hdr->seq_ctrl, 0x10);
1400 * Mark queue as shared in transport if shared
1401 * Note this has to be done after queue enablement because enablement
1402 * can also set this value, and there is no indication there to shared
1406 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1408 spin_lock_bh(&mvmsta->lock);
1410 * This looks racy, but it is not. We have only one packet for
1411 * this ra/tid in our Tx path since we stop the Qdisc when we
1412 * need to allocate a new TFD queue.
1415 mvmsta->tid_data[tid].seq_number += 0x10;
1416 mvmsta->tid_data[tid].txq_id = queue;
1417 mvmsta->tfd_queue_msk |= BIT(queue);
1418 queue_state = mvmsta->tid_data[tid].state;
1420 if (mvmsta->reserved_queue == queue)
1421 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1422 spin_unlock_bh(&mvmsta->lock);
1424 if (!shared_queue) {
1425 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1429 /* If we need to re-enable aggregations... */
1430 if (queue_state == IWL_AGG_ON) {
1431 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1436 /* Redirect queue, if needed */
1437 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
1438 wdg_timeout, false);
1446 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
1451 static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
1453 if (tid == IWL_MAX_TID_COUNT)
1454 return IEEE80211_AC_VO; /* MGMT */
1456 return tid_to_mac80211_ac[tid];
1459 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
1460 struct ieee80211_sta *sta, int tid)
1462 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1463 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1464 struct sk_buff *skb;
1465 struct ieee80211_hdr *hdr;
1466 struct sk_buff_head deferred_tx;
1468 bool no_queue = false; /* Marks if there is a problem with the queue */
1471 lockdep_assert_held(&mvm->mutex);
1473 skb = skb_peek(&tid_data->deferred_tx_frames);
1476 hdr = (void *)skb->data;
1478 ac = iwl_mvm_tid_to_ac_queue(tid);
1479 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
1481 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
1482 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
1484 "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
1485 mvmsta->sta_id, tid);
1488 * Mark queue as problematic so later the deferred traffic is
1489 * freed, as we can do nothing with it
1494 __skb_queue_head_init(&deferred_tx);
1496 /* Disable bottom-halves when entering TX path */
1498 spin_lock(&mvmsta->lock);
1499 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
1500 mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
1501 spin_unlock(&mvmsta->lock);
1503 while ((skb = __skb_dequeue(&deferred_tx)))
1504 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
1505 ieee80211_free_txskb(mvm->hw, skb);
1509 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
1512 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1514 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1516 struct ieee80211_sta *sta;
1517 struct iwl_mvm_sta *mvmsta;
1518 unsigned long deferred_tid_traffic;
1521 mutex_lock(&mvm->mutex);
1523 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1525 /* Go over all stations with deferred traffic */
1526 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
1527 IWL_MVM_STATION_COUNT) {
1528 clear_bit(sta_id, mvm->sta_deferred_frames);
1529 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1530 lockdep_is_held(&mvm->mutex));
1531 if (IS_ERR_OR_NULL(sta))
1534 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1535 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
1537 for_each_set_bit(tid, &deferred_tid_traffic,
1538 IWL_MAX_TID_COUNT + 1)
1539 iwl_mvm_tx_deferred_stream(mvm, sta, tid);
1542 mutex_unlock(&mvm->mutex);
1545 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1546 struct ieee80211_sta *sta,
1547 enum nl80211_iftype vif_type)
1549 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1552 /* queue reserving is disabled on new TX path */
1553 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1556 /* run the general cleanup/unsharing of queues */
1557 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1559 spin_lock_bh(&mvm->queue_info_lock);
1561 /* Make sure we have free resources for this STA */
1562 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1563 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1564 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1565 IWL_MVM_QUEUE_FREE))
1566 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1568 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1569 IWL_MVM_DQA_MIN_DATA_QUEUE,
1570 IWL_MVM_DQA_MAX_DATA_QUEUE);
1572 spin_unlock_bh(&mvm->queue_info_lock);
1573 /* try again - this time kick out a queue if needed */
1574 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1576 IWL_ERR(mvm, "No available queues for new station\n");
1579 spin_lock_bh(&mvm->queue_info_lock);
1581 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1583 spin_unlock_bh(&mvm->queue_info_lock);
1585 mvmsta->reserved_queue = queue;
1587 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1588 queue, mvmsta->sta_id);
1594 * In DQA mode, after a HW restart the queues should be allocated as before, in
1595 * order to avoid race conditions when there are shared queues. This function
1596 * does the re-mapping and queue allocation.
1598 * Note that re-enabling aggregations isn't done in this function.
1600 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1601 struct iwl_mvm_sta *mvm_sta)
1603 unsigned int wdg_timeout =
1604 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1606 struct iwl_trans_txq_scd_cfg cfg = {
1607 .sta_id = mvm_sta->sta_id,
1608 .frame_limit = IWL_FRAME_LIMIT,
1611 /* Make sure reserved queue is still marked as such (if allocated) */
1612 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1613 mvm->queue_info[mvm_sta->reserved_queue].status =
1614 IWL_MVM_QUEUE_RESERVED;
1616 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1617 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1618 int txq_id = tid_data->txq_id;
1622 if (txq_id == IWL_MVM_INVALID_QUEUE)
1625 skb_queue_head_init(&tid_data->deferred_tx_frames);
1627 ac = tid_to_mac80211_ac[i];
1628 mac_queue = mvm_sta->vif->hw_queue[ac];
1630 if (iwl_mvm_has_new_tx_api(mvm)) {
1631 IWL_DEBUG_TX_QUEUES(mvm,
1632 "Re-mapping sta %d tid %d\n",
1633 mvm_sta->sta_id, i);
1634 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
1637 tid_data->txq_id = txq_id;
1640 * Since we don't set the seq number after reset, and HW
1641 * sets it now, FW reset will cause the seq num to start
1642 * at 0 again, so driver will need to update it
1643 * internally as well, so it keeps in sync with real val
1645 tid_data->seq_number = 0;
1647 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1650 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1651 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1653 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1655 IWL_DEBUG_TX_QUEUES(mvm,
1656 "Re-mapping sta %d tid %d to queue %d\n",
1657 mvm_sta->sta_id, i, txq_id);
1659 iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
1661 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1666 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1667 struct iwl_mvm_int_sta *sta,
1669 u16 mac_id, u16 color)
1671 struct iwl_mvm_add_sta_cmd cmd;
1673 u32 status = ADD_STA_SUCCESS;
1675 lockdep_assert_held(&mvm->mutex);
1677 memset(&cmd, 0, sizeof(cmd));
1678 cmd.sta_id = sta->sta_id;
1679 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1681 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1682 cmd.station_type = sta->type;
1684 if (!iwl_mvm_has_new_tx_api(mvm))
1685 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1686 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1689 memcpy(cmd.addr, addr, ETH_ALEN);
1691 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1692 iwl_mvm_add_sta_cmd_size(mvm),
1697 switch (status & IWL_ADD_STA_STATUS_MASK) {
1698 case ADD_STA_SUCCESS:
1699 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1703 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1710 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1711 struct ieee80211_vif *vif,
1712 struct ieee80211_sta *sta)
1714 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1715 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1716 struct iwl_mvm_rxq_dup_data *dup_data;
1718 bool sta_update = false;
1719 unsigned int sta_flags = 0;
1721 lockdep_assert_held(&mvm->mutex);
1723 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1724 sta_id = iwl_mvm_find_free_sta_id(mvm,
1725 ieee80211_vif_type_p2p(vif));
1727 sta_id = mvm_sta->sta_id;
1729 if (sta_id == IWL_MVM_INVALID_STA)
1732 spin_lock_init(&mvm_sta->lock);
1734 /* if this is a HW restart re-alloc existing queues */
1735 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1736 struct iwl_mvm_int_sta tmp_sta = {
1738 .type = mvm_sta->sta_type,
1742 * First add an empty station since allocating
1743 * a queue requires a valid station
1745 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1746 mvmvif->id, mvmvif->color);
1750 iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
1752 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1756 mvm_sta->sta_id = sta_id;
1757 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1760 if (!mvm->trans->cfg->gen2)
1761 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1763 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1764 mvm_sta->tx_protection = 0;
1765 mvm_sta->tt_tx_protection = false;
1766 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1768 /* HW restart, don't assume the memory has been zeroed */
1769 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1770 mvm_sta->tfd_queue_msk = 0;
1772 /* for HW restart - reset everything but the sequence number */
1773 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1774 u16 seq = mvm_sta->tid_data[i].seq_number;
1775 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1776 mvm_sta->tid_data[i].seq_number = seq;
1779 * Mark all queues for this STA as unallocated and defer TX
1780 * frames until the queue is allocated
1782 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1783 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
1785 mvm_sta->deferred_traffic_tid_map = 0;
1786 mvm_sta->agg_tids = 0;
1788 if (iwl_mvm_has_new_rx_api(mvm) &&
1789 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1792 dup_data = kcalloc(mvm->trans->num_rx_queues,
1793 sizeof(*dup_data), GFP_KERNEL);
1797 * Initialize all the last_seq values to 0xffff which can never
1798 * compare equal to the frame's seq_ctrl in the check in
1799 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1800 * number and fragmented packets don't reach that function.
1802 * This thus allows receiving a packet with seqno 0 and the
1803 * retry bit set as the very first packet on a new TID.
1805 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1806 memset(dup_data[q].last_seq, 0xff,
1807 sizeof(dup_data[q].last_seq));
1808 mvm_sta->dup_data = dup_data;
1811 if (!iwl_mvm_has_new_tx_api(mvm)) {
1812 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1813 ieee80211_vif_type_p2p(vif));
1819 * if rs is registered with mac80211, then "add station" will be handled
1820 * via the corresponding ops, otherwise need to notify rate scaling here
1822 if (iwl_mvm_has_tlc_offload(mvm))
1823 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1826 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1830 if (vif->type == NL80211_IFTYPE_STATION) {
1832 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1833 mvmvif->ap_sta_id = sta_id;
1835 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1839 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1847 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1850 struct iwl_mvm_add_sta_cmd cmd = {};
1854 lockdep_assert_held(&mvm->mutex);
1856 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1857 cmd.sta_id = mvmsta->sta_id;
1858 cmd.add_modify = STA_MODE_MODIFY;
1859 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1860 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1862 status = ADD_STA_SUCCESS;
1863 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1864 iwl_mvm_add_sta_cmd_size(mvm),
1869 switch (status & IWL_ADD_STA_STATUS_MASK) {
1870 case ADD_STA_SUCCESS:
1871 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1876 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1885 * Remove a station from the FW table. Before sending the command to remove
1886 * the station validate that the station is indeed known to the driver (sanity
1889 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1891 struct ieee80211_sta *sta;
1892 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1897 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1898 lockdep_is_held(&mvm->mutex));
1900 /* Note: internal stations are marked as error values */
1902 IWL_ERR(mvm, "Invalid station id\n");
1906 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1907 sizeof(rm_sta_cmd), &rm_sta_cmd);
1909 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1916 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1917 struct ieee80211_vif *vif,
1918 struct iwl_mvm_sta *mvm_sta)
1923 lockdep_assert_held(&mvm->mutex);
1925 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1926 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1929 ac = iwl_mvm_tid_to_ac_queue(i);
1930 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
1931 vif->hw_queue[ac], i, 0);
1932 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1936 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1937 struct iwl_mvm_sta *mvm_sta)
1941 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1945 spin_lock_bh(&mvm_sta->lock);
1946 txq_id = mvm_sta->tid_data[i].txq_id;
1947 spin_unlock_bh(&mvm_sta->lock);
1949 if (txq_id == IWL_MVM_INVALID_QUEUE)
1952 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1960 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1961 struct ieee80211_vif *vif,
1962 struct ieee80211_sta *sta)
1964 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1965 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1966 u8 sta_id = mvm_sta->sta_id;
1969 lockdep_assert_held(&mvm->mutex);
1971 if (iwl_mvm_has_new_rx_api(mvm))
1972 kfree(mvm_sta->dup_data);
1974 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1978 /* flush its queues here since we are freeing mvm_sta */
1979 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
1982 if (iwl_mvm_has_new_tx_api(mvm)) {
1983 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1985 u32 q_mask = mvm_sta->tfd_queue_msk;
1987 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1993 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1995 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
1997 /* If there is a TXQ still marked as reserved - free it */
1998 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1999 u8 reserved_txq = mvm_sta->reserved_queue;
2000 enum iwl_mvm_queue_status *status;
2003 * If no traffic has gone through the reserved TXQ - it
2004 * is still marked as IWL_MVM_QUEUE_RESERVED, and
2005 * should be manually marked as free again
2007 spin_lock_bh(&mvm->queue_info_lock);
2008 status = &mvm->queue_info[reserved_txq].status;
2009 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
2010 (*status != IWL_MVM_QUEUE_FREE),
2011 "sta_id %d reserved txq %d status %d",
2012 sta_id, reserved_txq, *status)) {
2013 spin_unlock_bh(&mvm->queue_info_lock);
2017 *status = IWL_MVM_QUEUE_FREE;
2018 spin_unlock_bh(&mvm->queue_info_lock);
2021 if (vif->type == NL80211_IFTYPE_STATION &&
2022 mvmvif->ap_sta_id == sta_id) {
2023 /* if associated - we can't remove the AP STA now */
2024 if (vif->bss_conf.assoc)
2027 /* unassoc - go ahead - remove the AP STA now */
2028 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2030 /* clear d0i3_ap_sta_id if no longer relevant */
2031 if (mvm->d0i3_ap_sta_id == sta_id)
2032 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
2036 * This shouldn't happen - the TDLS channel switch should be canceled
2037 * before the STA is removed.
2039 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
2040 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
2041 cancel_delayed_work(&mvm->tdls_cs.dwork);
2045 * Make sure that the tx response code sees the station as -EBUSY and
2046 * calls the drain worker.
2048 spin_lock_bh(&mvm_sta->lock);
2049 spin_unlock_bh(&mvm_sta->lock);
2051 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
2052 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
2057 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
2058 struct ieee80211_vif *vif,
2061 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2063 lockdep_assert_held(&mvm->mutex);
2065 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2069 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2070 struct iwl_mvm_int_sta *sta,
2071 u32 qmask, enum nl80211_iftype iftype,
2072 enum iwl_sta_type type)
2074 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2075 sta->sta_id == IWL_MVM_INVALID_STA) {
2076 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2077 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2081 sta->tfd_queue_msk = qmask;
2084 /* put a non-NULL value so iterating over the stations won't stop */
2085 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2089 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2091 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2092 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2093 sta->sta_id = IWL_MVM_INVALID_STA;
2096 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
2099 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
2100 mvm->cfg->base_params->wd_timeout :
2101 IWL_WATCHDOG_DISABLED;
2103 if (iwl_mvm_has_new_tx_api(mvm)) {
2105 iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
2108 *queue = tvqm_queue;
2110 struct iwl_trans_txq_scd_cfg cfg = {
2113 .tid = IWL_MAX_TID_COUNT,
2115 .frame_limit = IWL_FRAME_LIMIT,
2118 iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
2122 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
2126 lockdep_assert_held(&mvm->mutex);
2128 /* Allocate aux station and assign to it the aux queue */
2129 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2130 NL80211_IFTYPE_UNSPECIFIED,
2131 IWL_STA_AUX_ACTIVITY);
2135 /* Map Aux queue to fifo - needs to happen before adding Aux station */
2136 if (!iwl_mvm_has_new_tx_api(mvm))
2137 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2138 mvm->aux_sta.sta_id,
2139 IWL_MVM_TX_FIFO_MCAST);
2141 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
2144 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2149 * For 22000 firmware and on we cannot add queue to a station unknown
2150 * to firmware so enable queue here - after the station was added
2152 if (iwl_mvm_has_new_tx_api(mvm))
2153 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
2154 mvm->aux_sta.sta_id,
2155 IWL_MVM_TX_FIFO_MCAST);
2160 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2162 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2165 lockdep_assert_held(&mvm->mutex);
2167 /* Map snif queue to fifo - must happen before adding snif station */
2168 if (!iwl_mvm_has_new_tx_api(mvm))
2169 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2170 mvm->snif_sta.sta_id,
2171 IWL_MVM_TX_FIFO_BE);
2173 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
2179 * For 22000 firmware and on we cannot add queue to a station unknown
2180 * to firmware so enable queue here - after the station was added
2182 if (iwl_mvm_has_new_tx_api(mvm))
2183 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
2184 mvm->snif_sta.sta_id,
2185 IWL_MVM_TX_FIFO_BE);
2190 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2194 lockdep_assert_held(&mvm->mutex);
2196 iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
2197 IWL_MAX_TID_COUNT, 0);
2198 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2200 IWL_WARN(mvm, "Failed sending remove station\n");
2205 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2207 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2210 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
2212 lockdep_assert_held(&mvm->mutex);
2214 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2218 * Send the add station command for the vif's broadcast station.
2219 * Assumes that the station was already allocated.
2221 * @mvm: the mvm component
2222 * @vif: the interface to which the broadcast station is added
2223 * @bsta: the broadcast station to add.
2225 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2227 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2228 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2229 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2230 const u8 *baddr = _baddr;
2233 unsigned int wdg_timeout =
2234 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2235 struct iwl_trans_txq_scd_cfg cfg = {
2236 .fifo = IWL_MVM_TX_FIFO_VO,
2237 .sta_id = mvmvif->bcast_sta.sta_id,
2238 .tid = IWL_MAX_TID_COUNT,
2240 .frame_limit = IWL_FRAME_LIMIT,
2243 lockdep_assert_held(&mvm->mutex);
2245 if (!iwl_mvm_has_new_tx_api(mvm)) {
2246 if (vif->type == NL80211_IFTYPE_AP ||
2247 vif->type == NL80211_IFTYPE_ADHOC)
2248 queue = mvm->probe_queue;
2249 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2250 queue = mvm->p2p_dev_queue;
2251 else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
2254 bsta->tfd_queue_msk |= BIT(queue);
2256 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
2260 if (vif->type == NL80211_IFTYPE_ADHOC)
2261 baddr = vif->bss_conf.bssid;
2263 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2266 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2267 mvmvif->id, mvmvif->color);
2272 * For 22000 firmware and on we cannot add queue to a station unknown
2273 * to firmware so enable queue here - after the station was added
2275 if (iwl_mvm_has_new_tx_api(mvm)) {
2276 queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
2281 if (vif->type == NL80211_IFTYPE_AP ||
2282 vif->type == NL80211_IFTYPE_ADHOC)
2283 mvm->probe_queue = queue;
2284 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2285 mvm->p2p_dev_queue = queue;
2291 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2292 struct ieee80211_vif *vif)
2294 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2297 lockdep_assert_held(&mvm->mutex);
2299 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
2301 switch (vif->type) {
2302 case NL80211_IFTYPE_AP:
2303 case NL80211_IFTYPE_ADHOC:
2304 queue = mvm->probe_queue;
2306 case NL80211_IFTYPE_P2P_DEVICE:
2307 queue = mvm->p2p_dev_queue;
2310 WARN(1, "Can't free bcast queue on vif type %d\n",
2315 iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
2316 if (iwl_mvm_has_new_tx_api(mvm))
2319 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2320 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2323 /* Send the FW a request to remove the station from it's internal data
2324 * structures, but DO NOT remove the entry from the local data structures. */
2325 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2327 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2330 lockdep_assert_held(&mvm->mutex);
2332 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2334 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2336 IWL_WARN(mvm, "Failed sending remove station\n");
2340 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2342 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2344 lockdep_assert_held(&mvm->mutex);
2346 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2347 ieee80211_vif_type_p2p(vif),
2348 IWL_STA_GENERAL_PURPOSE);
2351 /* Allocate a new station entry for the broadcast station to the given vif,
2352 * and send it to the FW.
2353 * Note that each P2P mac should have its own broadcast station.
2355 * @mvm: the mvm component
2356 * @vif: the interface to which the broadcast station is added
2357 * @bsta: the broadcast station to add. */
2358 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2360 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2361 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2364 lockdep_assert_held(&mvm->mutex);
2366 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2370 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2373 iwl_mvm_dealloc_int_sta(mvm, bsta);
2378 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2380 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2382 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2386 * Send the FW a request to remove the station from it's internal data
2387 * structures, and in addition remove it from the local data structure.
2389 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2393 lockdep_assert_held(&mvm->mutex);
2395 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2397 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2403 * Allocate a new station entry for the multicast station to the given vif,
2404 * and send it to the FW.
2405 * Note that each AP/GO mac should have its own multicast station.
2407 * @mvm: the mvm component
2408 * @vif: the interface to which the multicast station is added
2410 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2412 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2413 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2414 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2415 const u8 *maddr = _maddr;
2416 struct iwl_trans_txq_scd_cfg cfg = {
2417 .fifo = IWL_MVM_TX_FIFO_MCAST,
2418 .sta_id = msta->sta_id,
2421 .frame_limit = IWL_FRAME_LIMIT,
2423 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2426 lockdep_assert_held(&mvm->mutex);
2428 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2429 vif->type != NL80211_IFTYPE_ADHOC))
2433 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2434 * invalid, so make sure we use the queue we want.
2435 * Note that this is done here as we want to avoid making DQA
2436 * changes in mac80211 layer.
2438 if (vif->type == NL80211_IFTYPE_ADHOC) {
2439 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2440 mvmvif->cab_queue = vif->cab_queue;
2444 * While in previous FWs we had to exclude cab queue from TFD queue
2445 * mask, now it is needed as any other queue.
2447 if (!iwl_mvm_has_new_tx_api(mvm) &&
2448 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2449 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2451 msta->tfd_queue_msk |= BIT(vif->cab_queue);
2453 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2454 mvmvif->id, mvmvif->color);
2456 iwl_mvm_dealloc_int_sta(mvm, msta);
2461 * Enable cab queue after the ADD_STA command is sent.
2462 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2463 * command with unknown station id, and for FW that doesn't support
2464 * station API since the cab queue is not included in the
2467 if (iwl_mvm_has_new_tx_api(mvm)) {
2468 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2472 mvmvif->cab_queue = queue;
2473 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2474 IWL_UCODE_TLV_API_STA_TYPE))
2475 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2478 if (mvmvif->ap_wep_key) {
2479 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2481 if (key_offset == STA_KEY_IDX_INVALID)
2484 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2485 mvmvif->ap_wep_key, 1, 0, NULL, 0,
2495 * Send the FW a request to remove the station from it's internal data
2496 * structures, and in addition remove it from the local data structure.
2498 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2500 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2503 lockdep_assert_held(&mvm->mutex);
2505 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2507 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2510 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2512 IWL_WARN(mvm, "Failed sending remove station\n");
2517 #define IWL_MAX_RX_BA_SESSIONS 16
2519 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2521 struct iwl_mvm_delba_notif notif = {
2522 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
2526 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
2529 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2530 struct iwl_mvm_baid_data *data)
2534 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2536 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2538 struct iwl_mvm_reorder_buffer *reorder_buf =
2539 &data->reorder_buf[i];
2540 struct iwl_mvm_reorder_buf_entry *entries =
2541 &data->entries[i * data->entries_per_queue];
2543 spin_lock_bh(&reorder_buf->lock);
2544 if (likely(!reorder_buf->num_stored)) {
2545 spin_unlock_bh(&reorder_buf->lock);
2550 * This shouldn't happen in regular DELBA since the internal
2551 * delBA notification should trigger a release of all frames in
2552 * the reorder buffer.
2556 for (j = 0; j < reorder_buf->buf_size; j++)
2557 __skb_queue_purge(&entries[j].e.frames);
2559 * Prevent timer re-arm. This prevents a very far fetched case
2560 * where we timed out on the notification. There may be prior
2561 * RX frames pending in the RX queue before the notification
2562 * that might get processed between now and the actual deletion
2563 * and we would re-arm the timer although we are deleting the
2566 reorder_buf->removed = true;
2567 spin_unlock_bh(&reorder_buf->lock);
2568 del_timer_sync(&reorder_buf->reorder_timer);
2572 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2573 struct iwl_mvm_baid_data *data,
2574 u16 ssn, u16 buf_size)
2578 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2579 struct iwl_mvm_reorder_buffer *reorder_buf =
2580 &data->reorder_buf[i];
2581 struct iwl_mvm_reorder_buf_entry *entries =
2582 &data->entries[i * data->entries_per_queue];
2585 reorder_buf->num_stored = 0;
2586 reorder_buf->head_sn = ssn;
2587 reorder_buf->buf_size = buf_size;
2588 /* rx reorder timer */
2589 timer_setup(&reorder_buf->reorder_timer,
2590 iwl_mvm_reorder_timer_expired, 0);
2591 spin_lock_init(&reorder_buf->lock);
2592 reorder_buf->mvm = mvm;
2593 reorder_buf->queue = i;
2594 reorder_buf->valid = false;
2595 for (j = 0; j < reorder_buf->buf_size; j++)
2596 __skb_queue_head_init(&entries[j].e.frames);
2600 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2601 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2603 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2604 struct iwl_mvm_add_sta_cmd cmd = {};
2605 struct iwl_mvm_baid_data *baid_data = NULL;
2609 lockdep_assert_held(&mvm->mutex);
2611 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2612 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2616 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2617 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2619 /* sparse doesn't like the __align() so don't check */
2622 * The division below will be OK if either the cache line size
2623 * can be divided by the entry size (ALIGN will round up) or if
2624 * if the entry size can be divided by the cache line size, in
2625 * which case the ALIGN() will do nothing.
2627 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2628 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2632 * Upward align the reorder buffer size to fill an entire cache
2633 * line for each queue, to avoid sharing cache lines between
2636 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2639 * Allocate here so if allocation fails we can bail out early
2640 * before starting the BA session in the firmware
2642 baid_data = kzalloc(sizeof(*baid_data) +
2643 mvm->trans->num_rx_queues *
2650 * This division is why we need the above BUILD_BUG_ON(),
2651 * if that doesn't hold then this will not be right.
2653 baid_data->entries_per_queue =
2654 reorder_buf_size / sizeof(baid_data->entries[0]);
2657 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2658 cmd.sta_id = mvm_sta->sta_id;
2659 cmd.add_modify = STA_MODE_MODIFY;
2661 cmd.add_immediate_ba_tid = (u8) tid;
2662 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2663 cmd.rx_ba_window = cpu_to_le16(buf_size);
2665 cmd.remove_immediate_ba_tid = (u8) tid;
2667 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
2668 STA_MODIFY_REMOVE_BA_TID;
2670 status = ADD_STA_SUCCESS;
2671 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2672 iwl_mvm_add_sta_cmd_size(mvm),
2677 switch (status & IWL_ADD_STA_STATUS_MASK) {
2678 case ADD_STA_SUCCESS:
2679 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2680 start ? "start" : "stopp");
2682 case ADD_STA_IMMEDIATE_BA_FAILURE:
2683 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2688 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2689 start ? "start" : "stopp", status);
2699 mvm->rx_ba_sessions++;
2701 if (!iwl_mvm_has_new_rx_api(mvm))
2704 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
2708 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
2709 IWL_ADD_STA_BAID_SHIFT);
2710 baid_data->baid = baid;
2711 baid_data->timeout = timeout;
2712 baid_data->last_rx = jiffies;
2713 baid_data->rcu_ptr = &mvm->baid_map[baid];
2714 timer_setup(&baid_data->session_timer,
2715 iwl_mvm_rx_agg_session_expired, 0);
2716 baid_data->mvm = mvm;
2717 baid_data->tid = tid;
2718 baid_data->sta_id = mvm_sta->sta_id;
2720 mvm_sta->tid_to_baid[tid] = baid;
2722 mod_timer(&baid_data->session_timer,
2723 TU_TO_EXP_TIME(timeout * 2));
2725 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2727 * protect the BA data with RCU to cover a case where our
2728 * internal RX sync mechanism will timeout (not that it's
2729 * supposed to happen) and we will free the session data while
2730 * RX is being processed in parallel
2732 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2733 mvm_sta->sta_id, tid, baid);
2734 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2735 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2737 u8 baid = mvm_sta->tid_to_baid[tid];
2739 if (mvm->rx_ba_sessions > 0)
2740 /* check that restart flow didn't zero the counter */
2741 mvm->rx_ba_sessions--;
2742 if (!iwl_mvm_has_new_rx_api(mvm))
2745 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2748 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2749 if (WARN_ON(!baid_data))
2752 /* synchronize all rx queues so we can safely delete */
2753 iwl_mvm_free_reorder(mvm, baid_data);
2754 del_timer_sync(&baid_data->session_timer);
2755 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2756 kfree_rcu(baid_data, rcu_head);
2757 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2766 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2767 int tid, u8 queue, bool start)
2769 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2770 struct iwl_mvm_add_sta_cmd cmd = {};
2774 lockdep_assert_held(&mvm->mutex);
2777 mvm_sta->tfd_queue_msk |= BIT(queue);
2778 mvm_sta->tid_disable_agg &= ~BIT(tid);
2780 /* In DQA-mode the queue isn't removed on agg termination */
2781 mvm_sta->tid_disable_agg |= BIT(tid);
2784 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2785 cmd.sta_id = mvm_sta->sta_id;
2786 cmd.add_modify = STA_MODE_MODIFY;
2787 if (!iwl_mvm_has_new_tx_api(mvm))
2788 cmd.modify_mask = STA_MODIFY_QUEUES;
2789 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2790 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2791 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2793 status = ADD_STA_SUCCESS;
2794 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2795 iwl_mvm_add_sta_cmd_size(mvm),
2800 switch (status & IWL_ADD_STA_STATUS_MASK) {
2801 case ADD_STA_SUCCESS:
2805 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2806 start ? "start" : "stopp", status);
2813 const u8 tid_to_mac80211_ac[] = {
2822 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2825 static const u8 tid_to_ucode_ac[] = {
2836 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2837 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2839 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2840 struct iwl_mvm_tid_data *tid_data;
2845 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2848 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2849 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2851 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2852 mvmsta->tid_data[tid].state);
2856 lockdep_assert_held(&mvm->mutex);
2858 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2859 iwl_mvm_has_new_tx_api(mvm)) {
2860 u8 ac = tid_to_mac80211_ac[tid];
2862 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2867 spin_lock_bh(&mvmsta->lock);
2869 /* possible race condition - we entered D0i3 while starting agg */
2870 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
2871 spin_unlock_bh(&mvmsta->lock);
2872 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
2876 spin_lock(&mvm->queue_info_lock);
2879 * Note the possible cases:
2880 * 1. An enabled TXQ - TXQ needs to become agg'ed
2881 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2884 txq_id = mvmsta->tid_data[tid].txq_id;
2885 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2886 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2887 IWL_MVM_DQA_MIN_DATA_QUEUE,
2888 IWL_MVM_DQA_MAX_DATA_QUEUE);
2891 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2895 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2896 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2897 } else if (unlikely(mvm->queue_info[txq_id].status ==
2898 IWL_MVM_QUEUE_SHARED)) {
2900 IWL_DEBUG_TX_QUEUES(mvm,
2901 "Can't start tid %d agg on shared queue!\n",
2906 spin_unlock(&mvm->queue_info_lock);
2908 IWL_DEBUG_TX_QUEUES(mvm,
2909 "AGG for tid %d will be on queue #%d\n",
2912 tid_data = &mvmsta->tid_data[tid];
2913 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2914 tid_data->txq_id = txq_id;
2915 *ssn = tid_data->ssn;
2917 IWL_DEBUG_TX_QUEUES(mvm,
2918 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2919 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2920 tid_data->next_reclaimed);
2923 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2924 * to align the wrap around of ssn so we compare relevant values.
2926 normalized_ssn = tid_data->ssn;
2927 if (mvm->trans->cfg->gen2)
2928 normalized_ssn &= 0xff;
2930 if (normalized_ssn == tid_data->next_reclaimed) {
2931 tid_data->state = IWL_AGG_STARTING;
2932 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2934 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2941 spin_unlock(&mvm->queue_info_lock);
2943 spin_unlock_bh(&mvmsta->lock);
2948 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2949 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2952 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2953 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2954 unsigned int wdg_timeout =
2955 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2957 bool alloc_queue = true;
2958 enum iwl_mvm_queue_status queue_status;
2961 struct iwl_trans_txq_scd_cfg cfg = {
2962 .sta_id = mvmsta->sta_id,
2964 .frame_limit = buf_size,
2969 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2970 * manager, so this function should never be called in this case.
2972 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2975 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2976 != IWL_MAX_TID_COUNT);
2978 spin_lock_bh(&mvmsta->lock);
2979 ssn = tid_data->ssn;
2980 queue = tid_data->txq_id;
2981 tid_data->state = IWL_AGG_ON;
2982 mvmsta->agg_tids |= BIT(tid);
2983 tid_data->ssn = 0xffff;
2984 tid_data->amsdu_in_ampdu_allowed = amsdu;
2985 spin_unlock_bh(&mvmsta->lock);
2987 if (iwl_mvm_has_new_tx_api(mvm)) {
2989 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2990 * would have failed, so if we are here there is no need to
2992 * However, if aggregation size is different than the default
2993 * size, the scheduler should be reconfigured.
2994 * We cannot do this with the new TX API, so return unsupported
2995 * for now, until it will be offloaded to firmware..
2996 * Note that if SCD default value changes - this condition
2997 * should be updated as well.
2999 if (buf_size < IWL_FRAME_LIMIT)
3002 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3008 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3010 spin_lock_bh(&mvm->queue_info_lock);
3011 queue_status = mvm->queue_info[queue].status;
3012 spin_unlock_bh(&mvm->queue_info_lock);
3014 /* Maybe there is no need to even alloc a queue... */
3015 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3016 alloc_queue = false;
3019 * Only reconfig the SCD for the queue if the window size has
3020 * changed from current (become smaller)
3022 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3024 * If reconfiguring an existing queue, it first must be
3027 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3031 "Error draining queue before reconfig\n");
3035 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3036 mvmsta->sta_id, tid,
3040 "Error reconfiguring TXQ #%d\n", queue);
3046 iwl_mvm_enable_txq(mvm, queue,
3047 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
3050 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3051 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3052 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3057 /* No need to mark as reserved */
3058 spin_lock_bh(&mvm->queue_info_lock);
3059 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3060 spin_unlock_bh(&mvm->queue_info_lock);
3064 * Even though in theory the peer could have different
3065 * aggregation reorder buffer sizes for different sessions,
3066 * our ucode doesn't allow for that and has a global limit
3067 * for each station. Therefore, use the minimum of all the
3068 * aggregation sessions and our default value.
3070 mvmsta->max_agg_bufsize =
3071 min(mvmsta->max_agg_bufsize, buf_size);
3072 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3074 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3077 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
3080 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3081 struct iwl_mvm_sta *mvmsta,
3082 struct iwl_mvm_tid_data *tid_data)
3084 u16 txq_id = tid_data->txq_id;
3086 if (iwl_mvm_has_new_tx_api(mvm))
3089 spin_lock_bh(&mvm->queue_info_lock);
3091 * The TXQ is marked as reserved only if no traffic came through yet
3092 * This means no traffic has been sent on this TID (agg'd or not), so
3093 * we no longer have use for the queue. Since it hasn't even been
3094 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3097 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3098 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3099 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3102 spin_unlock_bh(&mvm->queue_info_lock);
3105 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3106 struct ieee80211_sta *sta, u16 tid)
3108 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3109 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3114 * If mac80211 is cleaning its state, then say that we finished since
3115 * our state has been cleared anyway.
3117 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3118 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3122 spin_lock_bh(&mvmsta->lock);
3124 txq_id = tid_data->txq_id;
3126 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3127 mvmsta->sta_id, tid, txq_id, tid_data->state);
3129 mvmsta->agg_tids &= ~BIT(tid);
3131 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3133 switch (tid_data->state) {
3135 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3137 IWL_DEBUG_TX_QUEUES(mvm,
3138 "ssn = %d, next_recl = %d\n",
3139 tid_data->ssn, tid_data->next_reclaimed);
3141 tid_data->ssn = 0xffff;
3142 tid_data->state = IWL_AGG_OFF;
3143 spin_unlock_bh(&mvmsta->lock);
3145 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3147 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3149 case IWL_AGG_STARTING:
3150 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3152 * The agg session has been stopped before it was set up. This
3153 * can happen when the AddBA timer times out for example.
3156 /* No barriers since we are under mutex */
3157 lockdep_assert_held(&mvm->mutex);
3159 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3160 tid_data->state = IWL_AGG_OFF;
3165 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3166 mvmsta->sta_id, tid, tid_data->state);
3168 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3172 spin_unlock_bh(&mvmsta->lock);
3177 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3178 struct ieee80211_sta *sta, u16 tid)
3180 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3181 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3183 enum iwl_mvm_agg_state old_state;
3186 * First set the agg state to OFF to avoid calling
3187 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3189 spin_lock_bh(&mvmsta->lock);
3190 txq_id = tid_data->txq_id;
3191 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3192 mvmsta->sta_id, tid, txq_id, tid_data->state);
3193 old_state = tid_data->state;
3194 tid_data->state = IWL_AGG_OFF;
3195 mvmsta->agg_tids &= ~BIT(tid);
3196 spin_unlock_bh(&mvmsta->lock);
3198 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3200 if (old_state >= IWL_AGG_ON) {
3201 iwl_mvm_drain_sta(mvm, mvmsta, true);
3203 if (iwl_mvm_has_new_tx_api(mvm)) {
3204 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3206 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3207 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3209 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
3210 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3211 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3214 iwl_mvm_drain_sta(mvm, mvmsta, false);
3216 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3222 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3224 int i, max = -1, max_offs = -1;
3226 lockdep_assert_held(&mvm->mutex);
3228 /* Pick the unused key offset with the highest 'deleted'
3229 * counter. Every time a key is deleted, all the counters
3230 * are incremented and the one that was just deleted is
3231 * reset to zero. Thus, the highest counter is the one
3232 * that was deleted longest ago. Pick that one.
3234 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3235 if (test_bit(i, mvm->fw_key_table))
3237 if (mvm->fw_key_deleted[i] > max) {
3238 max = mvm->fw_key_deleted[i];
3244 return STA_KEY_IDX_INVALID;
3249 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3250 struct ieee80211_vif *vif,
3251 struct ieee80211_sta *sta)
3253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3256 return iwl_mvm_sta_from_mac80211(sta);
3259 * The device expects GTKs for station interfaces to be
3260 * installed as GTKs for the AP station. If we have no
3261 * station ID, then use AP's station ID.
3263 if (vif->type == NL80211_IFTYPE_STATION &&
3264 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3265 u8 sta_id = mvmvif->ap_sta_id;
3267 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3268 lockdep_is_held(&mvm->mutex));
3271 * It is possible that the 'sta' parameter is NULL,
3272 * for example when a GTK is removed - the sta_id will then
3273 * be the AP ID, and no station was passed by mac80211.
3275 if (IS_ERR_OR_NULL(sta))
3278 return iwl_mvm_sta_from_mac80211(sta);
3284 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3286 struct ieee80211_key_conf *key, bool mcast,
3287 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3288 u8 key_offset, bool mfp)
3291 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3292 struct iwl_mvm_add_sta_key_cmd cmd;
3300 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3301 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3303 if (sta_id == IWL_MVM_INVALID_STA)
3306 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3307 STA_KEY_FLG_KEYID_MSK;
3308 key_flags = cpu_to_le16(keyidx);
3309 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3311 switch (key->cipher) {
3312 case WLAN_CIPHER_SUITE_TKIP:
3313 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3315 memcpy((void *)&u.cmd.tx_mic_key,
3316 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3319 memcpy((void *)&u.cmd.rx_mic_key,
3320 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3322 pn = atomic64_read(&key->tx_pn);
3325 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3326 for (i = 0; i < 5; i++)
3327 u.cmd_v1.tkip_rx_ttak[i] =
3328 cpu_to_le16(tkip_p1k[i]);
3330 memcpy(u.cmd.common.key, key->key, key->keylen);
3332 case WLAN_CIPHER_SUITE_CCMP:
3333 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3334 memcpy(u.cmd.common.key, key->key, key->keylen);
3336 pn = atomic64_read(&key->tx_pn);
3338 case WLAN_CIPHER_SUITE_WEP104:
3339 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3341 case WLAN_CIPHER_SUITE_WEP40:
3342 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3343 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3345 case WLAN_CIPHER_SUITE_GCMP_256:
3346 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3348 case WLAN_CIPHER_SUITE_GCMP:
3349 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3350 memcpy(u.cmd.common.key, key->key, key->keylen);
3352 pn = atomic64_read(&key->tx_pn);
3355 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3356 memcpy(u.cmd.common.key, key->key, key->keylen);
3360 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3362 key_flags |= cpu_to_le16(STA_KEY_MFP);
3364 u.cmd.common.key_offset = key_offset;
3365 u.cmd.common.key_flags = key_flags;
3366 u.cmd.common.sta_id = sta_id;
3369 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3370 size = sizeof(u.cmd);
3372 size = sizeof(u.cmd_v1);
3375 status = ADD_STA_SUCCESS;
3376 if (cmd_flags & CMD_ASYNC)
3377 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3380 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3384 case ADD_STA_SUCCESS:
3385 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3389 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3396 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3397 struct ieee80211_key_conf *keyconf,
3398 u8 sta_id, bool remove_key)
3400 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3402 /* verify the key details match the required command's expectations */
3403 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3404 (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
3405 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3406 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3407 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3410 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3411 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3414 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3415 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3418 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3420 struct ieee80211_key_seq seq;
3423 switch (keyconf->cipher) {
3424 case WLAN_CIPHER_SUITE_AES_CMAC:
3425 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3427 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3428 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3429 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3435 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3436 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3437 igtk_cmd.ctrl_flags |=
3438 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3439 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3440 pn = seq.aes_cmac.pn;
3441 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3442 ((u64) pn[4] << 8) |
3443 ((u64) pn[3] << 16) |
3444 ((u64) pn[2] << 24) |
3445 ((u64) pn[1] << 32) |
3446 ((u64) pn[0] << 40));
3449 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
3450 remove_key ? "removing" : "installing",
3453 if (!iwl_mvm_has_new_rx_api(mvm)) {
3454 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3455 .ctrl_flags = igtk_cmd.ctrl_flags,
3456 .key_id = igtk_cmd.key_id,
3457 .sta_id = igtk_cmd.sta_id,
3458 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3461 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3462 ARRAY_SIZE(igtk_cmd_v1.igtk));
3463 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3464 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3466 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3467 sizeof(igtk_cmd), &igtk_cmd);
3471 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3472 struct ieee80211_vif *vif,
3473 struct ieee80211_sta *sta)
3475 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3480 if (vif->type == NL80211_IFTYPE_STATION &&
3481 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3482 u8 sta_id = mvmvif->ap_sta_id;
3483 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3484 lockdep_is_held(&mvm->mutex));
3492 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3493 struct ieee80211_vif *vif,
3494 struct ieee80211_sta *sta,
3495 struct ieee80211_key_conf *keyconf,
3501 struct ieee80211_key_seq seq;
3507 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3509 sta_id = mvm_sta->sta_id;
3511 } else if (vif->type == NL80211_IFTYPE_AP &&
3512 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3513 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3515 sta_id = mvmvif->mcast_sta.sta_id;
3517 IWL_ERR(mvm, "Failed to find station id\n");
3521 switch (keyconf->cipher) {
3522 case WLAN_CIPHER_SUITE_TKIP:
3523 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3524 /* get phase 1 key from mac80211 */
3525 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3526 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3527 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3528 seq.tkip.iv32, p1k, 0, key_offset,
3531 case WLAN_CIPHER_SUITE_CCMP:
3532 case WLAN_CIPHER_SUITE_WEP40:
3533 case WLAN_CIPHER_SUITE_WEP104:
3534 case WLAN_CIPHER_SUITE_GCMP:
3535 case WLAN_CIPHER_SUITE_GCMP_256:
3536 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3537 0, NULL, 0, key_offset, mfp);
3540 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3541 0, NULL, 0, key_offset, mfp);
3547 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3548 struct ieee80211_key_conf *keyconf,
3552 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3553 struct iwl_mvm_add_sta_key_cmd cmd;
3555 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3556 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3561 /* This is a valid situation for GTK removal */
3562 if (sta_id == IWL_MVM_INVALID_STA)
3565 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3566 STA_KEY_FLG_KEYID_MSK);
3567 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
3568 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
3571 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3574 * The fields assigned here are in the same location at the start
3575 * of the command, so we can do this union trick.
3577 u.cmd.common.key_flags = key_flags;
3578 u.cmd.common.key_offset = keyconf->hw_key_idx;
3579 u.cmd.common.sta_id = sta_id;
3581 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
3583 status = ADD_STA_SUCCESS;
3584 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
3588 case ADD_STA_SUCCESS:
3589 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
3593 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
3600 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3601 struct ieee80211_vif *vif,
3602 struct ieee80211_sta *sta,
3603 struct ieee80211_key_conf *keyconf,
3606 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3607 struct iwl_mvm_sta *mvm_sta;
3608 u8 sta_id = IWL_MVM_INVALID_STA;
3610 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3612 lockdep_assert_held(&mvm->mutex);
3614 if (vif->type != NL80211_IFTYPE_AP ||
3615 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3616 /* Get the station id from the mvm local station table */
3617 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3619 IWL_ERR(mvm, "Failed to find station\n");
3622 sta_id = mvm_sta->sta_id;
3625 * It is possible that the 'sta' parameter is NULL, and thus
3626 * there is a need to retrieve the sta from the local station
3630 sta = rcu_dereference_protected(
3631 mvm->fw_id_to_mac_id[sta_id],
3632 lockdep_is_held(&mvm->mutex));
3633 if (IS_ERR_OR_NULL(sta)) {
3634 IWL_ERR(mvm, "Invalid station id\n");
3639 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3642 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3644 sta_id = mvmvif->mcast_sta.sta_id;
3647 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3648 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3649 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3650 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3654 /* If the key_offset is not pre-assigned, we need to find a
3655 * new offset to use. In normal cases, the offset is not
3656 * pre-assigned, but during HW_RESTART we want to reuse the
3657 * same indices, so we pass them when this function is called.
3659 * In D3 entry, we need to hardcoded the indices (because the
3660 * firmware hardcodes the PTK offset to 0). In this case, we
3661 * need to make sure we don't overwrite the hw_key_idx in the
3662 * keyconf structure, because otherwise we cannot configure
3663 * the original ones back when resuming.
3665 if (key_offset == STA_KEY_IDX_INVALID) {
3666 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3667 if (key_offset == STA_KEY_IDX_INVALID)
3669 keyconf->hw_key_idx = key_offset;
3672 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3677 * For WEP, the same key is used for multicast and unicast. Upload it
3678 * again, using the same key offset, and now pointing the other one
3679 * to the same key slot (offset).
3680 * If this fails, remove the original as well.
3682 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3683 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3685 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3686 key_offset, !mcast);
3688 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3693 __set_bit(key_offset, mvm->fw_key_table);
3696 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3697 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3698 sta ? sta->addr : zero_addr, ret);
3702 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3703 struct ieee80211_vif *vif,
3704 struct ieee80211_sta *sta,
3705 struct ieee80211_key_conf *keyconf)
3707 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3708 struct iwl_mvm_sta *mvm_sta;
3709 u8 sta_id = IWL_MVM_INVALID_STA;
3712 lockdep_assert_held(&mvm->mutex);
3714 /* Get the station from the mvm local station table */
3715 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3717 sta_id = mvm_sta->sta_id;
3718 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3719 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3722 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3723 keyconf->keyidx, sta_id);
3725 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3726 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3727 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
3728 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3730 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3731 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3732 keyconf->hw_key_idx);
3736 /* track which key was deleted last */
3737 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3738 if (mvm->fw_key_deleted[i] < U8_MAX)
3739 mvm->fw_key_deleted[i]++;
3741 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3743 if (sta && !mvm_sta) {
3744 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3748 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3752 /* delete WEP key twice to get rid of (now useless) offset */
3753 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3754 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3755 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3760 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3761 struct ieee80211_vif *vif,
3762 struct ieee80211_key_conf *keyconf,
3763 struct ieee80211_sta *sta, u32 iv32,
3766 struct iwl_mvm_sta *mvm_sta;
3767 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3768 bool mfp = sta ? sta->mfp : false;
3772 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3773 if (WARN_ON_ONCE(!mvm_sta))
3775 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3776 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3783 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3784 struct ieee80211_sta *sta)
3786 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3787 struct iwl_mvm_add_sta_cmd cmd = {
3788 .add_modify = STA_MODE_MODIFY,
3789 .sta_id = mvmsta->sta_id,
3790 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3791 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3795 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3796 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3798 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3801 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3802 struct ieee80211_sta *sta,
3803 enum ieee80211_frame_release_type reason,
3804 u16 cnt, u16 tids, bool more_data,
3805 bool single_sta_queue)
3807 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3808 struct iwl_mvm_add_sta_cmd cmd = {
3809 .add_modify = STA_MODE_MODIFY,
3810 .sta_id = mvmsta->sta_id,
3811 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3812 .sleep_tx_count = cpu_to_le16(cnt),
3813 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3816 unsigned long _tids = tids;
3818 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3819 * Note that this field is reserved and unused by firmware not
3820 * supporting GO uAPSD, so it's safe to always do this.
3822 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3823 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3825 /* If we're releasing frames from aggregation or dqa queues then check
3826 * if all the queues that we're releasing frames from, combined, have:
3827 * - more frames than the service period, in which case more_data
3829 * - fewer than 'cnt' frames, in which case we need to adjust the
3830 * firmware command (but do that unconditionally)
3832 if (single_sta_queue) {
3833 int remaining = cnt;
3836 spin_lock_bh(&mvmsta->lock);
3837 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3838 struct iwl_mvm_tid_data *tid_data;
3841 tid_data = &mvmsta->tid_data[tid];
3843 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3844 if (n_queued > remaining) {
3849 remaining -= n_queued;
3851 sleep_tx_count = cnt - remaining;
3852 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3853 mvmsta->sleep_tx_count = sleep_tx_count;
3854 spin_unlock_bh(&mvmsta->lock);
3856 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3857 if (WARN_ON(cnt - remaining == 0)) {
3858 ieee80211_sta_eosp(sta);
3863 /* Note: this is ignored by firmware not supporting GO uAPSD */
3865 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3867 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3868 mvmsta->next_status_eosp = true;
3869 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3871 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3874 /* block the Tx queues until the FW updated the sleep Tx count */
3875 iwl_trans_block_txq_ptrs(mvm->trans, true);
3877 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3878 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3879 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3881 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3884 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3885 struct iwl_rx_cmd_buffer *rxb)
3887 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3888 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3889 struct ieee80211_sta *sta;
3890 u32 sta_id = le32_to_cpu(notif->sta_id);
3892 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
3896 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3897 if (!IS_ERR_OR_NULL(sta))
3898 ieee80211_sta_eosp(sta);
3902 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3903 struct iwl_mvm_sta *mvmsta, bool disable)
3905 struct iwl_mvm_add_sta_cmd cmd = {
3906 .add_modify = STA_MODE_MODIFY,
3907 .sta_id = mvmsta->sta_id,
3908 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3909 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3910 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3914 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3915 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3917 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3920 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3921 struct ieee80211_sta *sta,
3924 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3926 spin_lock_bh(&mvm_sta->lock);
3928 if (mvm_sta->disable_tx == disable) {
3929 spin_unlock_bh(&mvm_sta->lock);
3933 mvm_sta->disable_tx = disable;
3935 /* Tell mac80211 to start/stop queuing tx for this station */
3936 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3938 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3940 spin_unlock_bh(&mvm_sta->lock);
3943 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3944 struct iwl_mvm_vif *mvmvif,
3945 struct iwl_mvm_int_sta *sta,
3948 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3949 struct iwl_mvm_add_sta_cmd cmd = {
3950 .add_modify = STA_MODE_MODIFY,
3951 .sta_id = sta->sta_id,
3952 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3953 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3954 .mac_id_n_color = cpu_to_le32(id),
3958 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
3959 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3961 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3964 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3965 struct iwl_mvm_vif *mvmvif,
3968 struct ieee80211_sta *sta;
3969 struct iwl_mvm_sta *mvm_sta;
3972 lockdep_assert_held(&mvm->mutex);
3974 /* Block/unblock all the stations of the given mvmvif */
3975 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
3976 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3977 lockdep_is_held(&mvm->mutex));
3978 if (IS_ERR_OR_NULL(sta))
3981 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3982 if (mvm_sta->mac_id_n_color !=
3983 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3986 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3989 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3992 /* Need to block/unblock also multicast station */
3993 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3994 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3995 &mvmvif->mcast_sta, disable);
3998 * Only unblock the broadcast station (FW blocks it for immediate
3999 * quiet, not the driver)
4001 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4002 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4003 &mvmvif->bcast_sta, disable);
4006 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4008 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4009 struct iwl_mvm_sta *mvmsta;
4013 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4015 if (!WARN_ON(!mvmsta))
4016 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4021 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4023 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4026 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4027 * to align the wrap around of ssn so we compare relevant values.
4029 if (mvm->trans->cfg->gen2)
4032 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);