1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
14 * New version of ADD_STA_sta command added new fields at the end of the
15 * structure, so sending the size of the relevant API's structure is enough to
16 * support both API versions.
18 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
20 if (iwl_mvm_has_new_rx_api(mvm) ||
21 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
22 return sizeof(struct iwl_mvm_add_sta_cmd);
24 return sizeof(struct iwl_mvm_add_sta_cmd_v7);
27 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
28 enum nl80211_iftype iftype)
33 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
34 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
36 lockdep_assert_held(&mvm->mutex);
38 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
39 if (iftype != NL80211_IFTYPE_STATION)
40 reserved_ids = BIT(0);
42 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */
43 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
44 if (BIT(sta_id) & reserved_ids)
47 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
48 lockdep_is_held(&mvm->mutex)))
51 return IWL_MVM_INVALID_STA;
54 /* send station add/update command to firmware */
55 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
56 bool update, unsigned int flags)
58 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
59 struct iwl_mvm_add_sta_cmd add_sta_cmd = {
60 .sta_id = mvm_sta->sta_id,
61 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
62 .add_modify = update ? 1 : 0,
63 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
65 STA_FLG_RTS_MIMO_PROT),
66 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
70 u32 agg_size = 0, mpdu_dens = 0;
72 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
73 add_sta_cmd.station_type = mvm_sta->sta_type;
75 if (!update || (flags & STA_MODIFY_QUEUES)) {
76 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
78 if (!iwl_mvm_has_new_tx_api(mvm)) {
79 add_sta_cmd.tfd_queue_msk =
80 cpu_to_le32(mvm_sta->tfd_queue_msk);
82 if (flags & STA_MODIFY_QUEUES)
83 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
85 WARN_ON(flags & STA_MODIFY_QUEUES);
89 switch (sta->bandwidth) {
90 case IEEE80211_STA_RX_BW_320:
91 case IEEE80211_STA_RX_BW_160:
92 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
94 case IEEE80211_STA_RX_BW_80:
95 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
97 case IEEE80211_STA_RX_BW_40:
98 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
100 case IEEE80211_STA_RX_BW_20:
101 if (sta->ht_cap.ht_supported)
102 add_sta_cmd.station_flags |=
103 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
107 switch (sta->rx_nss) {
109 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
112 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
115 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
119 switch (sta->smps_mode) {
120 case IEEE80211_SMPS_AUTOMATIC:
121 case IEEE80211_SMPS_NUM_MODES:
124 case IEEE80211_SMPS_STATIC:
126 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
127 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
129 case IEEE80211_SMPS_DYNAMIC:
130 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
132 case IEEE80211_SMPS_OFF:
137 if (sta->ht_cap.ht_supported) {
138 add_sta_cmd.station_flags_msk |=
139 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
140 STA_FLG_AGG_MPDU_DENS_MSK);
142 mpdu_dens = sta->ht_cap.ampdu_density;
145 if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
146 add_sta_cmd.station_flags_msk |=
147 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
148 STA_FLG_AGG_MPDU_DENS_MSK);
150 mpdu_dens = le16_get_bits(sta->he_6ghz_capa.capa,
151 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
152 agg_size = le16_get_bits(sta->he_6ghz_capa.capa,
153 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
155 if (sta->vht_cap.vht_supported) {
156 agg_size = sta->vht_cap.cap &
157 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
159 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
160 } else if (sta->ht_cap.ht_supported) {
161 agg_size = sta->ht_cap.ampdu_factor;
164 /* D6.0 10.12.2 A-MPDU length limit rules
165 * A STA indicates the maximum length of the A-MPDU preEOF padding
166 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
167 * Exponent field in its HT Capabilities, VHT Capabilities,
168 * and HE 6 GHz Band Capabilities elements (if present) and the
169 * Maximum AMPDU Length Exponent Extension field in its HE
170 * Capabilities element
172 if (sta->he_cap.has_he)
173 agg_size += u8_get_bits(sta->he_cap.he_cap_elem.mac_cap_info[3],
174 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
176 /* Limit to max A-MPDU supported by FW */
177 if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
178 agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
179 STA_FLG_MAX_AGG_SIZE_SHIFT);
181 add_sta_cmd.station_flags |=
182 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
183 add_sta_cmd.station_flags |=
184 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
185 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
186 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
189 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
191 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
192 add_sta_cmd.uapsd_acs |= BIT(AC_BK);
193 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
194 add_sta_cmd.uapsd_acs |= BIT(AC_BE);
195 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
196 add_sta_cmd.uapsd_acs |= BIT(AC_VI);
197 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
198 add_sta_cmd.uapsd_acs |= BIT(AC_VO);
199 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
200 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
203 status = ADD_STA_SUCCESS;
204 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
205 iwl_mvm_add_sta_cmd_size(mvm),
206 &add_sta_cmd, &status);
210 switch (status & IWL_ADD_STA_STATUS_MASK) {
211 case ADD_STA_SUCCESS:
212 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
216 IWL_ERR(mvm, "ADD_STA failed\n");
223 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
225 struct iwl_mvm_baid_data *data =
226 from_timer(data, t, session_timer);
227 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
228 struct iwl_mvm_baid_data *ba_data;
229 struct ieee80211_sta *sta;
230 struct iwl_mvm_sta *mvm_sta;
231 unsigned long timeout;
235 ba_data = rcu_dereference(*rcu_ptr);
237 if (WARN_ON(!ba_data))
240 if (!ba_data->timeout)
243 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
244 if (time_is_after_jiffies(timeout)) {
245 mod_timer(&ba_data->session_timer, timeout);
250 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
253 * sta should be valid unless the following happens:
254 * The firmware asserts which triggers a reconfig flow, but
255 * the reconfig fails before we set the pointer to sta into
256 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
257 * A-MDPU and hence the timer continues to run. Then, the
258 * timer expires and sta is NULL.
263 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
264 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
265 sta->addr, ba_data->tid);
270 /* Disable aggregations for a bitmap of TIDs for a given station */
271 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
272 unsigned long disable_agg_tids,
275 struct iwl_mvm_add_sta_cmd cmd = {};
276 struct ieee80211_sta *sta;
277 struct iwl_mvm_sta *mvmsta;
281 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
284 sta_id = mvm->queue_info[queue].ra_sta_id;
288 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
290 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
295 mvmsta = iwl_mvm_sta_from_mac80211(sta);
297 mvmsta->tid_disable_agg |= disable_agg_tids;
299 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
300 cmd.sta_id = mvmsta->sta_id;
301 cmd.add_modify = STA_MODE_MODIFY;
302 cmd.modify_mask = STA_MODIFY_QUEUES;
303 if (disable_agg_tids)
304 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
306 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
307 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
308 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
312 /* Notify FW of queue removal from the STA queues */
313 status = ADD_STA_SUCCESS;
314 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
315 iwl_mvm_add_sta_cmd_size(mvm),
319 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
320 u16 *queueptr, u8 tid, u8 flags)
322 int queue = *queueptr;
323 struct iwl_scd_txq_cfg_cmd cmd = {
325 .action = SCD_CFG_DISABLE_QUEUE,
329 if (iwl_mvm_has_new_tx_api(mvm)) {
330 iwl_trans_txq_free(mvm->trans, queue);
331 *queueptr = IWL_MVM_INVALID_QUEUE;
336 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
339 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
341 cmd.action = mvm->queue_info[queue].tid_bitmap ?
342 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
343 if (cmd.action == SCD_CFG_DISABLE_QUEUE)
344 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
346 IWL_DEBUG_TX_QUEUES(mvm,
347 "Disabling TXQ #%d tids=0x%x\n",
349 mvm->queue_info[queue].tid_bitmap);
351 /* If the queue is still enabled - nothing left to do in this func */
352 if (cmd.action == SCD_CFG_ENABLE_QUEUE)
355 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
356 cmd.tid = mvm->queue_info[queue].txq_tid;
358 /* Make sure queue info is correct even though we overwrite it */
359 WARN(mvm->queue_info[queue].tid_bitmap,
360 "TXQ #%d info out-of-sync - tids=0x%x\n",
361 queue, mvm->queue_info[queue].tid_bitmap);
363 /* If we are here - the queue is freed and we can zero out these vals */
364 mvm->queue_info[queue].tid_bitmap = 0;
367 struct iwl_mvm_txq *mvmtxq =
368 iwl_mvm_txq_from_tid(sta, tid);
370 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
373 /* Regardless if this is a reserved TXQ for a STA - mark it as false */
374 mvm->queue_info[queue].reserved = false;
376 iwl_trans_txq_disable(mvm->trans, queue, false);
377 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
378 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
381 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
386 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
388 struct ieee80211_sta *sta;
389 struct iwl_mvm_sta *mvmsta;
390 unsigned long tid_bitmap;
391 unsigned long agg_tids = 0;
395 lockdep_assert_held(&mvm->mutex);
397 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
400 sta_id = mvm->queue_info[queue].ra_sta_id;
401 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
403 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
404 lockdep_is_held(&mvm->mutex));
406 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
409 mvmsta = iwl_mvm_sta_from_mac80211(sta);
411 spin_lock_bh(&mvmsta->lock);
412 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
413 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
414 agg_tids |= BIT(tid);
416 spin_unlock_bh(&mvmsta->lock);
422 * Remove a queue from a station's resources.
423 * Note that this only marks as free. It DOESN'T delete a BA agreement, and
424 * doesn't disable the queue
426 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
428 struct ieee80211_sta *sta;
429 struct iwl_mvm_sta *mvmsta;
430 unsigned long tid_bitmap;
431 unsigned long disable_agg_tids = 0;
435 lockdep_assert_held(&mvm->mutex);
437 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
440 sta_id = mvm->queue_info[queue].ra_sta_id;
441 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
445 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
447 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
452 mvmsta = iwl_mvm_sta_from_mac80211(sta);
454 spin_lock_bh(&mvmsta->lock);
455 /* Unmap MAC queues and TIDs from this queue */
456 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
457 struct iwl_mvm_txq *mvmtxq =
458 iwl_mvm_txq_from_tid(sta, tid);
460 if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
461 disable_agg_tids |= BIT(tid);
462 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
464 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
467 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
468 spin_unlock_bh(&mvmsta->lock);
473 * The TX path may have been using this TXQ_ID from the tid_data,
474 * so make sure it's no longer running so that we can safely reuse
475 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
476 * above, but nothing guarantees we've stopped using them. Thus,
477 * without this, we could get to iwl_mvm_disable_txq() and remove
478 * the queue while still sending frames to it.
482 return disable_agg_tids;
485 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
486 struct ieee80211_sta *old_sta,
489 struct iwl_mvm_sta *mvmsta;
491 unsigned long disable_agg_tids = 0;
493 u16 queue_tmp = queue;
496 lockdep_assert_held(&mvm->mutex);
498 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
501 sta_id = mvm->queue_info[queue].ra_sta_id;
502 tid = mvm->queue_info[queue].txq_tid;
504 same_sta = sta_id == new_sta_id;
506 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
507 if (WARN_ON(!mvmsta))
510 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
511 /* Disable the queue */
512 if (disable_agg_tids)
513 iwl_mvm_invalidate_sta_queue(mvm, queue,
514 disable_agg_tids, false);
516 ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
519 "Failed to free inactive queue %d (ret=%d)\n",
525 /* If TXQ is allocated to another STA, update removal in FW */
527 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
532 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
533 unsigned long tfd_queue_mask, u8 ac)
536 u8 ac_to_queue[IEEE80211_NUM_ACS];
540 * This protects us against grabbing a queue that's being reconfigured
541 * by the inactivity checker.
543 lockdep_assert_held(&mvm->mutex);
545 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
548 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
550 /* See what ACs the existing queues for this STA have */
551 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
552 /* Only DATA queues can be shared */
553 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
554 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
557 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
561 * The queue to share is chosen only from DATA queues as follows (in
562 * descending priority):
565 * 3. Highest AC queue that is lower than new AC
566 * 4. Any existing AC (there always is at least 1 DATA queue)
569 /* Priority 1: An AC_BE queue */
570 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
571 queue = ac_to_queue[IEEE80211_AC_BE];
572 /* Priority 2: Same AC queue */
573 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
574 queue = ac_to_queue[ac];
575 /* Priority 3a: If new AC is VO and VI exists - use VI */
576 else if (ac == IEEE80211_AC_VO &&
577 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
578 queue = ac_to_queue[IEEE80211_AC_VI];
579 /* Priority 3b: No BE so only AC less than the new one is BK */
580 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
581 queue = ac_to_queue[IEEE80211_AC_BK];
582 /* Priority 4a: No BE nor BK - use VI if exists */
583 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
584 queue = ac_to_queue[IEEE80211_AC_VI];
585 /* Priority 4b: No BE, BK nor VI - use VO if exists */
586 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
587 queue = ac_to_queue[IEEE80211_AC_VO];
589 /* Make sure queue found (or not) is legal */
590 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
591 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
592 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
593 IWL_ERR(mvm, "No DATA queues available to share\n");
601 * If a given queue has a higher AC than the TID stream that is being compared
602 * to, the queue needs to be redirected to the lower AC. This function does that
603 * in such a case, otherwise - if no redirection required - it does nothing,
604 * unless the %force param is true.
606 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
607 int ac, int ssn, unsigned int wdg_timeout,
608 bool force, struct iwl_mvm_txq *txq)
610 struct iwl_scd_txq_cfg_cmd cmd = {
612 .action = SCD_CFG_DISABLE_QUEUE,
617 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
621 * If the AC is lower than current one - FIFO needs to be redirected to
622 * the lowest one of the streams in the queue. Check if this is needed
624 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
625 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
626 * we need to check if the numerical value of X is LARGER than of Y.
628 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
629 IWL_DEBUG_TX_QUEUES(mvm,
630 "No redirection needed on TXQ #%d\n",
635 cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
636 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
637 cmd.tid = mvm->queue_info[queue].txq_tid;
638 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
640 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
641 queue, iwl_mvm_ac_to_tx_fifo[ac]);
643 /* Stop the queue and wait for it to empty */
646 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
648 IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
654 /* Before redirecting the queue we need to de-activate it */
655 iwl_trans_txq_disable(mvm->trans, queue, false);
656 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
658 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
661 /* Make sure the SCD wrptr is correctly set before reconfiguring */
662 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
664 /* Update the TID "owner" of the queue */
665 mvm->queue_info[queue].txq_tid = tid;
667 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
669 /* Redirect to lower AC */
670 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
671 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
673 /* Update AC marking of the queue */
674 mvm->queue_info[queue].mac80211_ac = ac;
677 * Mark queue as shared in transport if shared
678 * Note this has to be done after queue enablement because enablement
679 * can also set this value, and there is no indication there to shared
683 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
686 /* Continue using the queue */
687 txq->stopped = false;
692 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
697 lockdep_assert_held(&mvm->mutex);
699 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
700 "max queue %d >= num_of_queues (%d)", maxq,
701 mvm->trans->trans_cfg->base_params->num_of_queues))
702 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
704 /* This should not be hit with new TX path */
705 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
708 /* Start by looking for a free queue */
709 for (i = minq; i <= maxq; i++)
710 if (mvm->queue_info[i].tid_bitmap == 0 &&
711 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
717 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
718 u8 sta_id, u8 tid, unsigned int timeout)
722 if (tid == IWL_MAX_TID_COUNT) {
724 size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
725 mvm->trans->cfg->min_txq_size);
727 struct ieee80211_sta *sta;
730 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
732 /* this queue isn't used for traffic (cab_queue) */
733 if (IS_ERR_OR_NULL(sta)) {
734 size = IWL_MGMT_QUEUE_SIZE;
735 } else if (sta->he_cap.has_he) {
736 /* support for 256 ba size */
737 size = IWL_DEFAULT_QUEUE_SIZE_HE;
739 size = IWL_DEFAULT_QUEUE_SIZE;
745 /* take the min with bc tbl entries allowed */
746 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
748 /* size needs to be power of 2 values for calculating read/write pointers */
749 size = rounddown_pow_of_two(size);
752 __le16 enable = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE);
754 queue = iwl_trans_txq_alloc(mvm->trans, enable,
755 sta_id, tid, SCD_QUEUE_CFG,
759 IWL_DEBUG_TX_QUEUES(mvm,
760 "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
761 size, sta_id, tid, queue);
763 } while (queue < 0 && size >= 16);
768 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
774 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
775 struct ieee80211_sta *sta, u8 ac,
778 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
779 struct iwl_mvm_txq *mvmtxq =
780 iwl_mvm_txq_from_tid(sta, tid);
781 unsigned int wdg_timeout =
782 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
785 lockdep_assert_held(&mvm->mutex);
787 IWL_DEBUG_TX_QUEUES(mvm,
788 "Allocating queue for sta %d on tid %d\n",
789 mvmsta->sta_id, tid);
790 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
794 mvmtxq->txq_id = queue;
795 mvm->tvqm_info[queue].txq_tid = tid;
796 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
798 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
800 spin_lock_bh(&mvmsta->lock);
801 mvmsta->tid_data[tid].txq_id = queue;
802 spin_unlock_bh(&mvmsta->lock);
807 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
808 struct ieee80211_sta *sta,
809 int queue, u8 sta_id, u8 tid)
811 bool enable_queue = true;
813 /* Make sure this TID isn't already enabled */
814 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
815 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
820 /* Update mappings and refcounts */
821 if (mvm->queue_info[queue].tid_bitmap)
822 enable_queue = false;
824 mvm->queue_info[queue].tid_bitmap |= BIT(tid);
825 mvm->queue_info[queue].ra_sta_id = sta_id;
828 if (tid != IWL_MAX_TID_COUNT)
829 mvm->queue_info[queue].mac80211_ac =
830 tid_to_mac80211_ac[tid];
832 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
834 mvm->queue_info[queue].txq_tid = tid;
838 struct iwl_mvm_txq *mvmtxq =
839 iwl_mvm_txq_from_tid(sta, tid);
841 mvmtxq->txq_id = queue;
844 IWL_DEBUG_TX_QUEUES(mvm,
845 "Enabling TXQ #%d tids=0x%x\n",
846 queue, mvm->queue_info[queue].tid_bitmap);
851 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
853 const struct iwl_trans_txq_scd_cfg *cfg,
854 unsigned int wdg_timeout)
856 struct iwl_scd_txq_cfg_cmd cmd = {
858 .action = SCD_CFG_ENABLE_QUEUE,
859 .window = cfg->frame_limit,
860 .sta_id = cfg->sta_id,
861 .ssn = cpu_to_le16(ssn),
862 .tx_fifo = cfg->fifo,
863 .aggregate = cfg->aggregate,
868 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
871 /* Send the enabling command if we need to */
872 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
875 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
878 le16_add_cpu(&cmd.ssn, 1);
880 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
881 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
886 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
888 struct iwl_scd_txq_cfg_cmd cmd = {
890 .action = SCD_CFG_UPDATE_QUEUE_TID,
893 unsigned long tid_bitmap;
896 lockdep_assert_held(&mvm->mutex);
898 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
901 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
903 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
906 /* Find any TID for queue */
907 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
909 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
911 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
913 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
918 mvm->queue_info[queue].txq_tid = tid;
919 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
923 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
925 struct ieee80211_sta *sta;
926 struct iwl_mvm_sta *mvmsta;
929 unsigned long tid_bitmap;
930 unsigned int wdg_timeout;
934 /* queue sharing is disabled on new TX path */
935 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
938 lockdep_assert_held(&mvm->mutex);
940 sta_id = mvm->queue_info[queue].ra_sta_id;
941 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
943 /* Find TID for queue, and make sure it is the only one on the queue */
944 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
945 if (tid_bitmap != BIT(tid)) {
946 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
951 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
954 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
955 lockdep_is_held(&mvm->mutex));
957 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
960 mvmsta = iwl_mvm_sta_from_mac80211(sta);
961 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
963 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
965 ret = iwl_mvm_redirect_queue(mvm, queue, tid,
966 tid_to_mac80211_ac[tid], ssn,
968 iwl_mvm_txq_from_tid(sta, tid));
970 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
974 /* If aggs should be turned back on - do it */
975 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
976 struct iwl_mvm_add_sta_cmd cmd = {0};
978 mvmsta->tid_disable_agg &= ~BIT(tid);
980 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
981 cmd.sta_id = mvmsta->sta_id;
982 cmd.add_modify = STA_MODE_MODIFY;
983 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
984 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
985 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
987 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
988 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
990 IWL_DEBUG_TX_QUEUES(mvm,
991 "TXQ #%d is now aggregated again\n",
994 /* Mark queue intenally as aggregating again */
995 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
999 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1003 * Remove inactive TIDs of a given queue.
1004 * If all queue TIDs are inactive - mark the queue as inactive
1005 * If only some the queue TIDs are inactive - unmap them from the queue
1007 * Returns %true if all TIDs were removed and the queue could be reused.
1009 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1010 struct iwl_mvm_sta *mvmsta, int queue,
1011 unsigned long tid_bitmap,
1012 unsigned long *unshare_queues,
1013 unsigned long *changetid_queues)
1017 lockdep_assert_held(&mvmsta->lock);
1018 lockdep_assert_held(&mvm->mutex);
1020 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1023 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1024 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1025 /* If some TFDs are still queued - don't mark TID as inactive */
1026 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1027 tid_bitmap &= ~BIT(tid);
1029 /* Don't mark as inactive any TID that has an active BA */
1030 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1031 tid_bitmap &= ~BIT(tid);
1034 /* If all TIDs in the queue are inactive - return it can be reused */
1035 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1036 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1041 * If we are here, this is a shared queue and not all TIDs timed-out.
1042 * Remove the ones that did.
1044 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1047 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1048 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1050 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1053 * We need to take into account a situation in which a TXQ was
1054 * allocated to TID x, and then turned shared by adding TIDs y
1055 * and z. If TID x becomes inactive and is removed from the TXQ,
1056 * ownership must be given to one of the remaining TIDs.
1057 * This is mainly because if TID x continues - a new queue can't
1058 * be allocated for it as long as it is an owner of another TXQ.
1060 * Mark this queue in the right bitmap, we'll send the command
1061 * to the firmware later.
1063 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1064 set_bit(queue, changetid_queues);
1066 IWL_DEBUG_TX_QUEUES(mvm,
1067 "Removing inactive TID %d from shared Q:%d\n",
1071 IWL_DEBUG_TX_QUEUES(mvm,
1072 "TXQ #%d left with tid bitmap 0x%x\n", queue,
1073 mvm->queue_info[queue].tid_bitmap);
1076 * There may be different TIDs with the same mac queues, so make
1077 * sure all TIDs have existing corresponding mac queues enabled
1079 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1081 /* If the queue is marked as shared - "unshare" it */
1082 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1083 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1084 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1086 set_bit(queue, unshare_queues);
1093 * Check for inactivity - this includes checking if any queue
1094 * can be unshared and finding one (and only one) that can be
1096 * This function is also invoked as a sort of clean-up task,
1097 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1099 * Returns the queue number, or -ENOSPC.
1101 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1103 unsigned long now = jiffies;
1104 unsigned long unshare_queues = 0;
1105 unsigned long changetid_queues = 0;
1106 int i, ret, free_queue = -ENOSPC;
1107 struct ieee80211_sta *queue_owner = NULL;
1109 lockdep_assert_held(&mvm->mutex);
1111 if (iwl_mvm_has_new_tx_api(mvm))
1116 /* we skip the CMD queue below by starting at 1 */
1117 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1119 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1120 struct ieee80211_sta *sta;
1121 struct iwl_mvm_sta *mvmsta;
1124 unsigned long inactive_tid_bitmap = 0;
1125 unsigned long queue_tid_bitmap;
1127 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1128 if (!queue_tid_bitmap)
1131 /* If TXQ isn't in active use anyway - nothing to do here... */
1132 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1133 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1136 /* Check to see if there are inactive TIDs on this queue */
1137 for_each_set_bit(tid, &queue_tid_bitmap,
1138 IWL_MAX_TID_COUNT + 1) {
1139 if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1140 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1143 inactive_tid_bitmap |= BIT(tid);
1146 /* If all TIDs are active - finish check on this queue */
1147 if (!inactive_tid_bitmap)
1151 * If we are here - the queue hadn't been served recently and is
1155 sta_id = mvm->queue_info[i].ra_sta_id;
1156 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1159 * If the STA doesn't exist anymore, it isn't an error. It could
1160 * be that it was removed since getting the queues, and in this
1161 * case it should've inactivated its queues anyway.
1163 if (IS_ERR_OR_NULL(sta))
1166 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1168 spin_lock_bh(&mvmsta->lock);
1169 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1170 inactive_tid_bitmap,
1173 if (ret && free_queue < 0) {
1177 /* only unlock sta lock - we still need the queue info lock */
1178 spin_unlock_bh(&mvmsta->lock);
1182 /* Reconfigure queues requiring reconfiguation */
1183 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1184 iwl_mvm_unshare_queue(mvm, i);
1185 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1186 iwl_mvm_change_queue_tid(mvm, i);
1190 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1191 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1200 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1201 struct ieee80211_sta *sta, u8 ac, int tid)
1203 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1204 struct iwl_trans_txq_scd_cfg cfg = {
1205 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1206 .sta_id = mvmsta->sta_id,
1208 .frame_limit = IWL_FRAME_LIMIT,
1210 unsigned int wdg_timeout =
1211 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1214 unsigned long disable_agg_tids = 0;
1215 enum iwl_mvm_agg_state queue_state;
1216 bool shared_queue = false, inc_ssn;
1218 unsigned long tfd_queue_mask;
1221 lockdep_assert_held(&mvm->mutex);
1223 if (iwl_mvm_has_new_tx_api(mvm))
1224 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1226 spin_lock_bh(&mvmsta->lock);
1227 tfd_queue_mask = mvmsta->tfd_queue_msk;
1228 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1229 spin_unlock_bh(&mvmsta->lock);
1231 if (tid == IWL_MAX_TID_COUNT) {
1232 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1233 IWL_MVM_DQA_MIN_MGMT_QUEUE,
1234 IWL_MVM_DQA_MAX_MGMT_QUEUE);
1235 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1236 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1239 /* If no such queue is found, we'll use a DATA queue instead */
1242 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1243 (mvm->queue_info[mvmsta->reserved_queue].status ==
1244 IWL_MVM_QUEUE_RESERVED)) {
1245 queue = mvmsta->reserved_queue;
1246 mvm->queue_info[queue].reserved = true;
1247 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1251 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1252 IWL_MVM_DQA_MIN_DATA_QUEUE,
1253 IWL_MVM_DQA_MAX_DATA_QUEUE);
1255 /* try harder - perhaps kill an inactive queue */
1256 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1259 /* No free queue - we'll have to share */
1261 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1263 shared_queue = true;
1264 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1269 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1270 * to make sure no one else takes it.
1271 * This will allow avoiding re-acquiring the lock at the end of the
1272 * configuration. On error we'll mark it back as free.
1274 if (queue > 0 && !shared_queue)
1275 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1277 /* This shouldn't happen - out of queues */
1278 if (WARN_ON(queue <= 0)) {
1279 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1285 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1286 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1288 * Mark all DATA queues as allowing to be aggregated at some point
1290 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1291 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1293 IWL_DEBUG_TX_QUEUES(mvm,
1294 "Allocating %squeue #%d to sta %d on tid %d\n",
1295 shared_queue ? "shared " : "", queue,
1296 mvmsta->sta_id, tid);
1299 /* Disable any open aggs on this queue */
1300 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1302 if (disable_agg_tids) {
1303 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1305 iwl_mvm_invalidate_sta_queue(mvm, queue,
1306 disable_agg_tids, false);
1310 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1313 * Mark queue as shared in transport if shared
1314 * Note this has to be done after queue enablement because enablement
1315 * can also set this value, and there is no indication there to shared
1319 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1321 spin_lock_bh(&mvmsta->lock);
1323 * This looks racy, but it is not. We have only one packet for
1324 * this ra/tid in our Tx path since we stop the Qdisc when we
1325 * need to allocate a new TFD queue.
1328 mvmsta->tid_data[tid].seq_number += 0x10;
1329 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1331 mvmsta->tid_data[tid].txq_id = queue;
1332 mvmsta->tfd_queue_msk |= BIT(queue);
1333 queue_state = mvmsta->tid_data[tid].state;
1335 if (mvmsta->reserved_queue == queue)
1336 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1337 spin_unlock_bh(&mvmsta->lock);
1339 if (!shared_queue) {
1340 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1344 /* If we need to re-enable aggregations... */
1345 if (queue_state == IWL_AGG_ON) {
1346 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1351 /* Redirect queue, if needed */
1352 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1354 iwl_mvm_txq_from_tid(sta, tid));
1363 iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
1368 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1370 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1373 mutex_lock(&mvm->mutex);
1375 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1377 while (!list_empty(&mvm->add_stream_txqs)) {
1378 struct iwl_mvm_txq *mvmtxq;
1379 struct ieee80211_txq *txq;
1382 mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1383 struct iwl_mvm_txq, list);
1385 txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1388 if (tid == IEEE80211_NUM_TIDS)
1389 tid = IWL_MAX_TID_COUNT;
1392 * We can't really do much here, but if this fails we can't
1393 * transmit anyway - so just don't transmit the frame etc.
1394 * and let them back up ... we've tried our best to allocate
1395 * a queue in the function itself.
1397 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1398 list_del_init(&mvmtxq->list);
1402 list_del_init(&mvmtxq->list);
1404 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1408 mutex_unlock(&mvm->mutex);
1411 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1412 struct ieee80211_sta *sta,
1413 enum nl80211_iftype vif_type)
1415 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1418 /* queue reserving is disabled on new TX path */
1419 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1422 /* run the general cleanup/unsharing of queues */
1423 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1425 /* Make sure we have free resources for this STA */
1426 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1427 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1428 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1429 IWL_MVM_QUEUE_FREE))
1430 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1432 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1433 IWL_MVM_DQA_MIN_DATA_QUEUE,
1434 IWL_MVM_DQA_MAX_DATA_QUEUE);
1436 /* try again - this time kick out a queue if needed */
1437 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1439 IWL_ERR(mvm, "No available queues for new station\n");
1443 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1445 mvmsta->reserved_queue = queue;
1447 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1448 queue, mvmsta->sta_id);
1454 * In DQA mode, after a HW restart the queues should be allocated as before, in
1455 * order to avoid race conditions when there are shared queues. This function
1456 * does the re-mapping and queue allocation.
1458 * Note that re-enabling aggregations isn't done in this function.
1460 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1461 struct ieee80211_sta *sta)
1463 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1465 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1467 struct iwl_trans_txq_scd_cfg cfg = {
1468 .sta_id = mvm_sta->sta_id,
1469 .frame_limit = IWL_FRAME_LIMIT,
1472 /* Make sure reserved queue is still marked as such (if allocated) */
1473 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1474 mvm->queue_info[mvm_sta->reserved_queue].status =
1475 IWL_MVM_QUEUE_RESERVED;
1477 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1478 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1479 int txq_id = tid_data->txq_id;
1482 if (txq_id == IWL_MVM_INVALID_QUEUE)
1485 ac = tid_to_mac80211_ac[i];
1487 if (iwl_mvm_has_new_tx_api(mvm)) {
1488 IWL_DEBUG_TX_QUEUES(mvm,
1489 "Re-mapping sta %d tid %d\n",
1490 mvm_sta->sta_id, i);
1491 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1494 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1495 * to try again later, we have no other good way of
1499 txq_id = IWL_MVM_INVALID_QUEUE;
1500 tid_data->txq_id = txq_id;
1503 * Since we don't set the seq number after reset, and HW
1504 * sets it now, FW reset will cause the seq num to start
1505 * at 0 again, so driver will need to update it
1506 * internally as well, so it keeps in sync with real val
1508 tid_data->seq_number = 0;
1510 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1513 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1514 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1516 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1518 IWL_DEBUG_TX_QUEUES(mvm,
1519 "Re-mapping sta %d tid %d to queue %d\n",
1520 mvm_sta->sta_id, i, txq_id);
1522 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1523 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1528 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1529 struct iwl_mvm_int_sta *sta,
1531 u16 mac_id, u16 color)
1533 struct iwl_mvm_add_sta_cmd cmd;
1535 u32 status = ADD_STA_SUCCESS;
1537 lockdep_assert_held(&mvm->mutex);
1539 memset(&cmd, 0, sizeof(cmd));
1540 cmd.sta_id = sta->sta_id;
1542 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1543 sta->type == IWL_STA_AUX_ACTIVITY)
1544 cmd.mac_id_n_color = cpu_to_le32(mac_id);
1546 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1549 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1550 cmd.station_type = sta->type;
1552 if (!iwl_mvm_has_new_tx_api(mvm))
1553 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1554 cmd.tid_disable_tx = cpu_to_le16(0xffff);
1557 memcpy(cmd.addr, addr, ETH_ALEN);
1559 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1560 iwl_mvm_add_sta_cmd_size(mvm),
1565 switch (status & IWL_ADD_STA_STATUS_MASK) {
1566 case ADD_STA_SUCCESS:
1567 IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1571 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1578 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1579 struct ieee80211_vif *vif,
1580 struct ieee80211_sta *sta)
1582 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1583 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1584 struct iwl_mvm_rxq_dup_data *dup_data;
1586 bool sta_update = false;
1587 unsigned int sta_flags = 0;
1589 lockdep_assert_held(&mvm->mutex);
1591 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1592 sta_id = iwl_mvm_find_free_sta_id(mvm,
1593 ieee80211_vif_type_p2p(vif));
1595 sta_id = mvm_sta->sta_id;
1597 if (sta_id == IWL_MVM_INVALID_STA)
1600 spin_lock_init(&mvm_sta->lock);
1602 /* if this is a HW restart re-alloc existing queues */
1603 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1604 struct iwl_mvm_int_sta tmp_sta = {
1606 .type = mvm_sta->sta_type,
1610 * First add an empty station since allocating
1611 * a queue requires a valid station
1613 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1614 mvmvif->id, mvmvif->color);
1618 iwl_mvm_realloc_queues_after_restart(mvm, sta);
1620 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1624 mvm_sta->sta_id = sta_id;
1625 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1628 if (!mvm->trans->trans_cfg->gen2)
1629 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1631 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1632 mvm_sta->tx_protection = 0;
1633 mvm_sta->tt_tx_protection = false;
1634 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1636 /* HW restart, don't assume the memory has been zeroed */
1637 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1638 mvm_sta->tfd_queue_msk = 0;
1640 /* for HW restart - reset everything but the sequence number */
1641 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1642 u16 seq = mvm_sta->tid_data[i].seq_number;
1643 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1644 mvm_sta->tid_data[i].seq_number = seq;
1647 * Mark all queues for this STA as unallocated and defer TX
1648 * frames until the queue is allocated
1650 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1653 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1654 struct iwl_mvm_txq *mvmtxq =
1655 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1657 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1658 INIT_LIST_HEAD(&mvmtxq->list);
1659 atomic_set(&mvmtxq->tx_request, 0);
1662 mvm_sta->agg_tids = 0;
1664 if (iwl_mvm_has_new_rx_api(mvm) &&
1665 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1668 dup_data = kcalloc(mvm->trans->num_rx_queues,
1669 sizeof(*dup_data), GFP_KERNEL);
1673 * Initialize all the last_seq values to 0xffff which can never
1674 * compare equal to the frame's seq_ctrl in the check in
1675 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1676 * number and fragmented packets don't reach that function.
1678 * This thus allows receiving a packet with seqno 0 and the
1679 * retry bit set as the very first packet on a new TID.
1681 for (q = 0; q < mvm->trans->num_rx_queues; q++)
1682 memset(dup_data[q].last_seq, 0xff,
1683 sizeof(dup_data[q].last_seq));
1684 mvm_sta->dup_data = dup_data;
1687 if (!iwl_mvm_has_new_tx_api(mvm)) {
1688 ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1689 ieee80211_vif_type_p2p(vif));
1695 * if rs is registered with mac80211, then "add station" will be handled
1696 * via the corresponding ops, otherwise need to notify rate scaling here
1698 if (iwl_mvm_has_tlc_offload(mvm))
1699 iwl_mvm_rs_add_sta(mvm, mvm_sta);
1701 spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1703 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1706 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1710 if (vif->type == NL80211_IFTYPE_STATION) {
1712 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1713 mvmvif->ap_sta_id = sta_id;
1715 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1719 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1727 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1730 struct iwl_mvm_add_sta_cmd cmd = {};
1734 lockdep_assert_held(&mvm->mutex);
1736 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1737 cmd.sta_id = mvmsta->sta_id;
1738 cmd.add_modify = STA_MODE_MODIFY;
1739 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1740 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1742 status = ADD_STA_SUCCESS;
1743 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1744 iwl_mvm_add_sta_cmd_size(mvm),
1749 switch (status & IWL_ADD_STA_STATUS_MASK) {
1750 case ADD_STA_SUCCESS:
1751 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1756 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1765 * Remove a station from the FW table. Before sending the command to remove
1766 * the station validate that the station is indeed known to the driver (sanity
1769 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1771 struct ieee80211_sta *sta;
1772 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1777 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1778 lockdep_is_held(&mvm->mutex));
1780 /* Note: internal stations are marked as error values */
1782 IWL_ERR(mvm, "Invalid station id\n");
1786 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1787 sizeof(rm_sta_cmd), &rm_sta_cmd);
1789 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1796 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1797 struct ieee80211_vif *vif,
1798 struct ieee80211_sta *sta)
1800 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1803 lockdep_assert_held(&mvm->mutex);
1805 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1806 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1809 iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
1811 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1814 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1815 struct iwl_mvm_txq *mvmtxq =
1816 iwl_mvm_txq_from_mac80211(sta->txq[i]);
1818 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1822 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1823 struct iwl_mvm_sta *mvm_sta)
1827 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1831 spin_lock_bh(&mvm_sta->lock);
1832 txq_id = mvm_sta->tid_data[i].txq_id;
1833 spin_unlock_bh(&mvm_sta->lock);
1835 if (txq_id == IWL_MVM_INVALID_QUEUE)
1838 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1846 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1847 struct ieee80211_vif *vif,
1848 struct ieee80211_sta *sta)
1850 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1851 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1852 u8 sta_id = mvm_sta->sta_id;
1855 lockdep_assert_held(&mvm->mutex);
1857 if (iwl_mvm_has_new_rx_api(mvm))
1858 kfree(mvm_sta->dup_data);
1860 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1864 /* flush its queues here since we are freeing mvm_sta */
1865 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1868 if (iwl_mvm_has_new_tx_api(mvm)) {
1869 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1871 u32 q_mask = mvm_sta->tfd_queue_msk;
1873 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1879 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1881 iwl_mvm_disable_sta_queues(mvm, vif, sta);
1883 /* If there is a TXQ still marked as reserved - free it */
1884 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1885 u8 reserved_txq = mvm_sta->reserved_queue;
1886 enum iwl_mvm_queue_status *status;
1889 * If no traffic has gone through the reserved TXQ - it
1890 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1891 * should be manually marked as free again
1893 status = &mvm->queue_info[reserved_txq].status;
1894 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1895 (*status != IWL_MVM_QUEUE_FREE),
1896 "sta_id %d reserved txq %d status %d",
1897 sta_id, reserved_txq, *status))
1900 *status = IWL_MVM_QUEUE_FREE;
1903 if (vif->type == NL80211_IFTYPE_STATION &&
1904 mvmvif->ap_sta_id == sta_id) {
1905 /* if associated - we can't remove the AP STA now */
1906 if (vif->bss_conf.assoc)
1909 /* unassoc - go ahead - remove the AP STA now */
1910 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1914 * This shouldn't happen - the TDLS channel switch should be canceled
1915 * before the STA is removed.
1917 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1918 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1919 cancel_delayed_work(&mvm->tdls_cs.dwork);
1923 * Make sure that the tx response code sees the station as -EBUSY and
1924 * calls the drain worker.
1926 spin_lock_bh(&mvm_sta->lock);
1927 spin_unlock_bh(&mvm_sta->lock);
1929 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1930 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1935 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1936 struct ieee80211_vif *vif,
1939 int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
1941 lockdep_assert_held(&mvm->mutex);
1943 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
1947 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
1948 struct iwl_mvm_int_sta *sta,
1949 u32 qmask, enum nl80211_iftype iftype,
1950 enum iwl_sta_type type)
1952 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1953 sta->sta_id == IWL_MVM_INVALID_STA) {
1954 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
1955 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
1959 sta->tfd_queue_msk = qmask;
1962 /* put a non-NULL value so iterating over the stations won't stop */
1963 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
1967 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
1969 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
1970 memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
1971 sta->sta_id = IWL_MVM_INVALID_STA;
1974 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
1977 unsigned int wdg_timeout =
1978 mvm->trans->trans_cfg->base_params->wd_timeout;
1979 struct iwl_trans_txq_scd_cfg cfg = {
1982 .tid = IWL_MAX_TID_COUNT,
1984 .frame_limit = IWL_FRAME_LIMIT,
1987 WARN_ON(iwl_mvm_has_new_tx_api(mvm));
1989 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
1992 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
1994 unsigned int wdg_timeout =
1995 mvm->trans->trans_cfg->base_params->wd_timeout;
1997 WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
1999 return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2003 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2004 int maccolor, u8 *addr,
2005 struct iwl_mvm_int_sta *sta,
2006 u16 *queue, int fifo)
2010 /* Map queue to fifo - needs to happen before adding station */
2011 if (!iwl_mvm_has_new_tx_api(mvm))
2012 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2014 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2016 if (!iwl_mvm_has_new_tx_api(mvm))
2017 iwl_mvm_disable_txq(mvm, NULL, queue,
2018 IWL_MAX_TID_COUNT, 0);
2023 * For 22000 firmware and on we cannot add queue to a station unknown
2024 * to firmware so enable queue here - after the station was added
2026 if (iwl_mvm_has_new_tx_api(mvm)) {
2029 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2031 iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2041 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2045 lockdep_assert_held(&mvm->mutex);
2047 /* Allocate aux station and assign to it the aux queue */
2048 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2049 NL80211_IFTYPE_UNSPECIFIED,
2050 IWL_STA_AUX_ACTIVITY);
2055 * In CDB NICs we need to specify which lmac to use for aux activity
2056 * using the mac_id argument place to send lmac_id to the function
2058 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2059 &mvm->aux_sta, &mvm->aux_queue,
2060 IWL_MVM_TX_FIFO_MCAST);
2062 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2069 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2071 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2073 lockdep_assert_held(&mvm->mutex);
2075 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2076 NULL, &mvm->snif_sta,
2078 IWL_MVM_TX_FIFO_BE);
2081 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2085 lockdep_assert_held(&mvm->mutex);
2087 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2090 iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
2091 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2093 IWL_WARN(mvm, "Failed sending remove station\n");
2098 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2102 lockdep_assert_held(&mvm->mutex);
2104 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2107 iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
2108 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2110 IWL_WARN(mvm, "Failed sending remove station\n");
2111 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2116 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2118 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2122 * Send the add station command for the vif's broadcast station.
2123 * Assumes that the station was already allocated.
2125 * @mvm: the mvm component
2126 * @vif: the interface to which the broadcast station is added
2127 * @bsta: the broadcast station to add.
2129 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2131 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2132 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2133 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2134 const u8 *baddr = _baddr;
2137 unsigned int wdg_timeout =
2138 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2139 struct iwl_trans_txq_scd_cfg cfg = {
2140 .fifo = IWL_MVM_TX_FIFO_VO,
2141 .sta_id = mvmvif->bcast_sta.sta_id,
2142 .tid = IWL_MAX_TID_COUNT,
2144 .frame_limit = IWL_FRAME_LIMIT,
2147 lockdep_assert_held(&mvm->mutex);
2149 if (!iwl_mvm_has_new_tx_api(mvm)) {
2150 if (vif->type == NL80211_IFTYPE_AP ||
2151 vif->type == NL80211_IFTYPE_ADHOC) {
2152 queue = mvm->probe_queue;
2153 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2154 queue = mvm->p2p_dev_queue;
2156 WARN(1, "Missing required TXQ for adding bcast STA\n");
2160 bsta->tfd_queue_msk |= BIT(queue);
2162 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2165 if (vif->type == NL80211_IFTYPE_ADHOC)
2166 baddr = vif->bss_conf.bssid;
2168 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2171 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2172 mvmvif->id, mvmvif->color);
2177 * For 22000 firmware and on we cannot add queue to a station unknown
2178 * to firmware so enable queue here - after the station was added
2180 if (iwl_mvm_has_new_tx_api(mvm)) {
2181 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2185 iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2189 if (vif->type == NL80211_IFTYPE_AP ||
2190 vif->type == NL80211_IFTYPE_ADHOC)
2191 mvm->probe_queue = queue;
2192 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2193 mvm->p2p_dev_queue = queue;
2199 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2200 struct ieee80211_vif *vif)
2202 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2203 u16 *queueptr, queue;
2205 lockdep_assert_held(&mvm->mutex);
2207 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2209 switch (vif->type) {
2210 case NL80211_IFTYPE_AP:
2211 case NL80211_IFTYPE_ADHOC:
2212 queueptr = &mvm->probe_queue;
2214 case NL80211_IFTYPE_P2P_DEVICE:
2215 queueptr = &mvm->p2p_dev_queue;
2218 WARN(1, "Can't free bcast queue on vif type %d\n",
2224 iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
2225 if (iwl_mvm_has_new_tx_api(mvm))
2228 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2229 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2232 /* Send the FW a request to remove the station from it's internal data
2233 * structures, but DO NOT remove the entry from the local data structures. */
2234 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2236 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2239 lockdep_assert_held(&mvm->mutex);
2241 iwl_mvm_free_bcast_sta_queues(mvm, vif);
2243 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2245 IWL_WARN(mvm, "Failed sending remove station\n");
2249 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2251 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2253 lockdep_assert_held(&mvm->mutex);
2255 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2256 ieee80211_vif_type_p2p(vif),
2257 IWL_STA_GENERAL_PURPOSE);
2260 /* Allocate a new station entry for the broadcast station to the given vif,
2261 * and send it to the FW.
2262 * Note that each P2P mac should have its own broadcast station.
2264 * @mvm: the mvm component
2265 * @vif: the interface to which the broadcast station is added
2266 * @bsta: the broadcast station to add. */
2267 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2269 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2270 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2273 lockdep_assert_held(&mvm->mutex);
2275 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2279 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2282 iwl_mvm_dealloc_int_sta(mvm, bsta);
2287 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2289 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2291 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2295 * Send the FW a request to remove the station from it's internal data
2296 * structures, and in addition remove it from the local data structure.
2298 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2302 lockdep_assert_held(&mvm->mutex);
2304 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2306 iwl_mvm_dealloc_bcast_sta(mvm, vif);
2312 * Allocate a new station entry for the multicast station to the given vif,
2313 * and send it to the FW.
2314 * Note that each AP/GO mac should have its own multicast station.
2316 * @mvm: the mvm component
2317 * @vif: the interface to which the multicast station is added
2319 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2321 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2322 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2323 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2324 const u8 *maddr = _maddr;
2325 struct iwl_trans_txq_scd_cfg cfg = {
2326 .fifo = vif->type == NL80211_IFTYPE_AP ?
2327 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2328 .sta_id = msta->sta_id,
2331 .frame_limit = IWL_FRAME_LIMIT,
2333 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2336 lockdep_assert_held(&mvm->mutex);
2338 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2339 vif->type != NL80211_IFTYPE_ADHOC))
2343 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2344 * invalid, so make sure we use the queue we want.
2345 * Note that this is done here as we want to avoid making DQA
2346 * changes in mac80211 layer.
2348 if (vif->type == NL80211_IFTYPE_ADHOC)
2349 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2352 * While in previous FWs we had to exclude cab queue from TFD queue
2353 * mask, now it is needed as any other queue.
2355 if (!iwl_mvm_has_new_tx_api(mvm) &&
2356 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2357 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2359 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2361 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2362 mvmvif->id, mvmvif->color);
2367 * Enable cab queue after the ADD_STA command is sent.
2368 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2369 * command with unknown station id, and for FW that doesn't support
2370 * station API since the cab queue is not included in the
2373 if (iwl_mvm_has_new_tx_api(mvm)) {
2374 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2381 mvmvif->cab_queue = queue;
2382 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2383 IWL_UCODE_TLV_API_STA_TYPE))
2384 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2389 iwl_mvm_dealloc_int_sta(mvm, msta);
2393 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2394 struct ieee80211_key_conf *keyconf,
2398 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2399 struct iwl_mvm_add_sta_key_cmd cmd;
2401 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2402 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2407 /* This is a valid situation for GTK removal */
2408 if (sta_id == IWL_MVM_INVALID_STA)
2411 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2412 STA_KEY_FLG_KEYID_MSK);
2413 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2414 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2417 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2420 * The fields assigned here are in the same location at the start
2421 * of the command, so we can do this union trick.
2423 u.cmd.common.key_flags = key_flags;
2424 u.cmd.common.key_offset = keyconf->hw_key_idx;
2425 u.cmd.common.sta_id = sta_id;
2427 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2429 status = ADD_STA_SUCCESS;
2430 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2434 case ADD_STA_SUCCESS:
2435 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2439 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2447 * Send the FW a request to remove the station from it's internal data
2448 * structures, and in addition remove it from the local data structure.
2450 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2452 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2455 lockdep_assert_held(&mvm->mutex);
2457 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2459 iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
2461 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2463 IWL_WARN(mvm, "Failed sending remove station\n");
2468 #define IWL_MAX_RX_BA_SESSIONS 16
2470 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2472 struct iwl_mvm_delba_data notif = {
2476 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2477 ¬if, sizeof(notif));
2480 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2481 struct iwl_mvm_baid_data *data)
2485 iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2487 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2489 struct iwl_mvm_reorder_buffer *reorder_buf =
2490 &data->reorder_buf[i];
2491 struct iwl_mvm_reorder_buf_entry *entries =
2492 &data->entries[i * data->entries_per_queue];
2494 spin_lock_bh(&reorder_buf->lock);
2495 if (likely(!reorder_buf->num_stored)) {
2496 spin_unlock_bh(&reorder_buf->lock);
2501 * This shouldn't happen in regular DELBA since the internal
2502 * delBA notification should trigger a release of all frames in
2503 * the reorder buffer.
2507 for (j = 0; j < reorder_buf->buf_size; j++)
2508 __skb_queue_purge(&entries[j].e.frames);
2510 * Prevent timer re-arm. This prevents a very far fetched case
2511 * where we timed out on the notification. There may be prior
2512 * RX frames pending in the RX queue before the notification
2513 * that might get processed between now and the actual deletion
2514 * and we would re-arm the timer although we are deleting the
2517 reorder_buf->removed = true;
2518 spin_unlock_bh(&reorder_buf->lock);
2519 del_timer_sync(&reorder_buf->reorder_timer);
2523 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2524 struct iwl_mvm_baid_data *data,
2525 u16 ssn, u16 buf_size)
2529 for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2530 struct iwl_mvm_reorder_buffer *reorder_buf =
2531 &data->reorder_buf[i];
2532 struct iwl_mvm_reorder_buf_entry *entries =
2533 &data->entries[i * data->entries_per_queue];
2536 reorder_buf->num_stored = 0;
2537 reorder_buf->head_sn = ssn;
2538 reorder_buf->buf_size = buf_size;
2539 /* rx reorder timer */
2540 timer_setup(&reorder_buf->reorder_timer,
2541 iwl_mvm_reorder_timer_expired, 0);
2542 spin_lock_init(&reorder_buf->lock);
2543 reorder_buf->mvm = mvm;
2544 reorder_buf->queue = i;
2545 reorder_buf->valid = false;
2546 for (j = 0; j < reorder_buf->buf_size; j++)
2547 __skb_queue_head_init(&entries[j].e.frames);
2551 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2552 bool start, int tid, u16 ssn, u16 buf_size)
2554 struct iwl_mvm_add_sta_cmd cmd = {
2555 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2556 .sta_id = mvm_sta->sta_id,
2557 .add_modify = STA_MODE_MODIFY,
2563 cmd.add_immediate_ba_tid = tid;
2564 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2565 cmd.rx_ba_window = cpu_to_le16(buf_size);
2566 cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2568 cmd.remove_immediate_ba_tid = tid;
2569 cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2572 status = ADD_STA_SUCCESS;
2573 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2574 iwl_mvm_add_sta_cmd_size(mvm),
2579 switch (status & IWL_ADD_STA_STATUS_MASK) {
2580 case ADD_STA_SUCCESS:
2581 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2582 start ? "start" : "stopp");
2583 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2584 !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2586 return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2587 case ADD_STA_IMMEDIATE_BA_FAILURE:
2588 IWL_WARN(mvm, "RX BA Session refused by fw\n");
2591 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2592 start ? "start" : "stopp", status);
2597 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2598 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2600 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2601 struct iwl_mvm_baid_data *baid_data = NULL;
2604 lockdep_assert_held(&mvm->mutex);
2606 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
2607 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2611 if (iwl_mvm_has_new_rx_api(mvm) && start) {
2612 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2614 /* sparse doesn't like the __align() so don't check */
2617 * The division below will be OK if either the cache line size
2618 * can be divided by the entry size (ALIGN will round up) or if
2619 * if the entry size can be divided by the cache line size, in
2620 * which case the ALIGN() will do nothing.
2622 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2623 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2627 * Upward align the reorder buffer size to fill an entire cache
2628 * line for each queue, to avoid sharing cache lines between
2631 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2634 * Allocate here so if allocation fails we can bail out early
2635 * before starting the BA session in the firmware
2637 baid_data = kzalloc(sizeof(*baid_data) +
2638 mvm->trans->num_rx_queues *
2645 * This division is why we need the above BUILD_BUG_ON(),
2646 * if that doesn't hold then this will not be right.
2648 baid_data->entries_per_queue =
2649 reorder_buf_size / sizeof(baid_data->entries[0]);
2652 baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size);
2659 mvm->rx_ba_sessions++;
2661 if (!iwl_mvm_has_new_rx_api(mvm))
2664 baid_data->baid = baid;
2665 baid_data->timeout = timeout;
2666 baid_data->last_rx = jiffies;
2667 baid_data->rcu_ptr = &mvm->baid_map[baid];
2668 timer_setup(&baid_data->session_timer,
2669 iwl_mvm_rx_agg_session_expired, 0);
2670 baid_data->mvm = mvm;
2671 baid_data->tid = tid;
2672 baid_data->sta_id = mvm_sta->sta_id;
2674 mvm_sta->tid_to_baid[tid] = baid;
2676 mod_timer(&baid_data->session_timer,
2677 TU_TO_EXP_TIME(timeout * 2));
2679 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2681 * protect the BA data with RCU to cover a case where our
2682 * internal RX sync mechanism will timeout (not that it's
2683 * supposed to happen) and we will free the session data while
2684 * RX is being processed in parallel
2686 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2687 mvm_sta->sta_id, tid, baid);
2688 WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2689 rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2691 baid = mvm_sta->tid_to_baid[tid];
2693 if (mvm->rx_ba_sessions > 0)
2694 /* check that restart flow didn't zero the counter */
2695 mvm->rx_ba_sessions--;
2696 if (!iwl_mvm_has_new_rx_api(mvm))
2699 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2702 baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2703 if (WARN_ON(!baid_data))
2706 /* synchronize all rx queues so we can safely delete */
2707 iwl_mvm_free_reorder(mvm, baid_data);
2708 del_timer_sync(&baid_data->session_timer);
2709 RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2710 kfree_rcu(baid_data, rcu_head);
2711 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2714 * After we've deleted it, do another queue sync
2715 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2716 * running it won't find a new session in the old
2717 * BAID. It can find the NULL pointer for the BAID,
2718 * but we must not have it find a different session.
2720 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2730 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2731 int tid, u8 queue, bool start)
2733 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2734 struct iwl_mvm_add_sta_cmd cmd = {};
2738 lockdep_assert_held(&mvm->mutex);
2741 mvm_sta->tfd_queue_msk |= BIT(queue);
2742 mvm_sta->tid_disable_agg &= ~BIT(tid);
2744 /* In DQA-mode the queue isn't removed on agg termination */
2745 mvm_sta->tid_disable_agg |= BIT(tid);
2748 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2749 cmd.sta_id = mvm_sta->sta_id;
2750 cmd.add_modify = STA_MODE_MODIFY;
2751 if (!iwl_mvm_has_new_tx_api(mvm))
2752 cmd.modify_mask = STA_MODIFY_QUEUES;
2753 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2754 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2755 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2757 status = ADD_STA_SUCCESS;
2758 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2759 iwl_mvm_add_sta_cmd_size(mvm),
2764 switch (status & IWL_ADD_STA_STATUS_MASK) {
2765 case ADD_STA_SUCCESS:
2769 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2770 start ? "start" : "stopp", status);
2777 const u8 tid_to_mac80211_ac[] = {
2786 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2789 static const u8 tid_to_ucode_ac[] = {
2800 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2801 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2803 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2804 struct iwl_mvm_tid_data *tid_data;
2809 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2812 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2813 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2815 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2816 mvmsta->tid_data[tid].state);
2820 lockdep_assert_held(&mvm->mutex);
2822 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2823 iwl_mvm_has_new_tx_api(mvm)) {
2824 u8 ac = tid_to_mac80211_ac[tid];
2826 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2831 spin_lock_bh(&mvmsta->lock);
2834 * Note the possible cases:
2835 * 1. An enabled TXQ - TXQ needs to become agg'ed
2836 * 2. The TXQ hasn't yet been enabled, so find a free one and mark
2839 txq_id = mvmsta->tid_data[tid].txq_id;
2840 if (txq_id == IWL_MVM_INVALID_QUEUE) {
2841 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2842 IWL_MVM_DQA_MIN_DATA_QUEUE,
2843 IWL_MVM_DQA_MAX_DATA_QUEUE);
2845 IWL_ERR(mvm, "Failed to allocate agg queue\n");
2851 /* TXQ hasn't yet been enabled, so mark it only as reserved */
2852 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2853 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2855 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2856 tid, IWL_MAX_HW_QUEUES - 1);
2859 } else if (unlikely(mvm->queue_info[txq_id].status ==
2860 IWL_MVM_QUEUE_SHARED)) {
2862 IWL_DEBUG_TX_QUEUES(mvm,
2863 "Can't start tid %d agg on shared queue!\n",
2868 IWL_DEBUG_TX_QUEUES(mvm,
2869 "AGG for tid %d will be on queue #%d\n",
2872 tid_data = &mvmsta->tid_data[tid];
2873 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2874 tid_data->txq_id = txq_id;
2875 *ssn = tid_data->ssn;
2877 IWL_DEBUG_TX_QUEUES(mvm,
2878 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
2879 mvmsta->sta_id, tid, txq_id, tid_data->ssn,
2880 tid_data->next_reclaimed);
2883 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
2884 * to align the wrap around of ssn so we compare relevant values.
2886 normalized_ssn = tid_data->ssn;
2887 if (mvm->trans->trans_cfg->gen2)
2888 normalized_ssn &= 0xff;
2890 if (normalized_ssn == tid_data->next_reclaimed) {
2891 tid_data->state = IWL_AGG_STARTING;
2892 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
2894 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
2895 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
2899 spin_unlock_bh(&mvmsta->lock);
2904 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2905 struct ieee80211_sta *sta, u16 tid, u16 buf_size,
2908 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2909 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2910 unsigned int wdg_timeout =
2911 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2913 bool alloc_queue = true;
2914 enum iwl_mvm_queue_status queue_status;
2917 struct iwl_trans_txq_scd_cfg cfg = {
2918 .sta_id = mvmsta->sta_id,
2920 .frame_limit = buf_size,
2925 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
2926 * manager, so this function should never be called in this case.
2928 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
2931 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
2932 != IWL_MAX_TID_COUNT);
2934 spin_lock_bh(&mvmsta->lock);
2935 ssn = tid_data->ssn;
2936 queue = tid_data->txq_id;
2937 tid_data->state = IWL_AGG_ON;
2938 mvmsta->agg_tids |= BIT(tid);
2939 tid_data->ssn = 0xffff;
2940 tid_data->amsdu_in_ampdu_allowed = amsdu;
2941 spin_unlock_bh(&mvmsta->lock);
2943 if (iwl_mvm_has_new_tx_api(mvm)) {
2945 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
2946 * would have failed, so if we are here there is no need to
2948 * However, if aggregation size is different than the default
2949 * size, the scheduler should be reconfigured.
2950 * We cannot do this with the new TX API, so return unsupported
2951 * for now, until it will be offloaded to firmware..
2952 * Note that if SCD default value changes - this condition
2953 * should be updated as well.
2955 if (buf_size < IWL_FRAME_LIMIT)
2958 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2964 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2966 queue_status = mvm->queue_info[queue].status;
2968 /* Maybe there is no need to even alloc a queue... */
2969 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2970 alloc_queue = false;
2973 * Only reconfig the SCD for the queue if the window size has
2974 * changed from current (become smaller)
2976 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
2978 * If reconfiguring an existing queue, it first must be
2981 ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
2985 "Error draining queue before reconfig\n");
2989 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
2990 mvmsta->sta_id, tid,
2994 "Error reconfiguring TXQ #%d\n", queue);
3000 iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3003 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
3004 if (queue_status != IWL_MVM_QUEUE_SHARED) {
3005 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3010 /* No need to mark as reserved */
3011 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3015 * Even though in theory the peer could have different
3016 * aggregation reorder buffer sizes for different sessions,
3017 * our ucode doesn't allow for that and has a global limit
3018 * for each station. Therefore, use the minimum of all the
3019 * aggregation sessions and our default value.
3021 mvmsta->max_agg_bufsize =
3022 min(mvmsta->max_agg_bufsize, buf_size);
3023 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3025 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3028 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3031 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3032 struct iwl_mvm_sta *mvmsta,
3033 struct iwl_mvm_tid_data *tid_data)
3035 u16 txq_id = tid_data->txq_id;
3037 lockdep_assert_held(&mvm->mutex);
3039 if (iwl_mvm_has_new_tx_api(mvm))
3043 * The TXQ is marked as reserved only if no traffic came through yet
3044 * This means no traffic has been sent on this TID (agg'd or not), so
3045 * we no longer have use for the queue. Since it hasn't even been
3046 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3049 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3050 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3051 tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3055 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3056 struct ieee80211_sta *sta, u16 tid)
3058 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3059 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3064 * If mac80211 is cleaning its state, then say that we finished since
3065 * our state has been cleared anyway.
3067 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3068 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3072 spin_lock_bh(&mvmsta->lock);
3074 txq_id = tid_data->txq_id;
3076 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3077 mvmsta->sta_id, tid, txq_id, tid_data->state);
3079 mvmsta->agg_tids &= ~BIT(tid);
3081 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3083 switch (tid_data->state) {
3085 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3087 IWL_DEBUG_TX_QUEUES(mvm,
3088 "ssn = %d, next_recl = %d\n",
3089 tid_data->ssn, tid_data->next_reclaimed);
3091 tid_data->ssn = 0xffff;
3092 tid_data->state = IWL_AGG_OFF;
3093 spin_unlock_bh(&mvmsta->lock);
3095 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3097 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3099 case IWL_AGG_STARTING:
3100 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3102 * The agg session has been stopped before it was set up. This
3103 * can happen when the AddBA timer times out for example.
3106 /* No barriers since we are under mutex */
3107 lockdep_assert_held(&mvm->mutex);
3109 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3110 tid_data->state = IWL_AGG_OFF;
3115 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3116 mvmsta->sta_id, tid, tid_data->state);
3118 "\ttid_data->txq_id = %d\n", tid_data->txq_id);
3122 spin_unlock_bh(&mvmsta->lock);
3127 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3128 struct ieee80211_sta *sta, u16 tid)
3130 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3131 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3133 enum iwl_mvm_agg_state old_state;
3136 * First set the agg state to OFF to avoid calling
3137 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3139 spin_lock_bh(&mvmsta->lock);
3140 txq_id = tid_data->txq_id;
3141 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3142 mvmsta->sta_id, tid, txq_id, tid_data->state);
3143 old_state = tid_data->state;
3144 tid_data->state = IWL_AGG_OFF;
3145 mvmsta->agg_tids &= ~BIT(tid);
3146 spin_unlock_bh(&mvmsta->lock);
3148 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3150 if (old_state >= IWL_AGG_ON) {
3151 iwl_mvm_drain_sta(mvm, mvmsta, true);
3153 if (iwl_mvm_has_new_tx_api(mvm)) {
3154 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3156 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3157 iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3159 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3160 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3161 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3164 iwl_mvm_drain_sta(mvm, mvmsta, false);
3166 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3172 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3174 int i, max = -1, max_offs = -1;
3176 lockdep_assert_held(&mvm->mutex);
3178 /* Pick the unused key offset with the highest 'deleted'
3179 * counter. Every time a key is deleted, all the counters
3180 * are incremented and the one that was just deleted is
3181 * reset to zero. Thus, the highest counter is the one
3182 * that was deleted longest ago. Pick that one.
3184 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3185 if (test_bit(i, mvm->fw_key_table))
3187 if (mvm->fw_key_deleted[i] > max) {
3188 max = mvm->fw_key_deleted[i];
3194 return STA_KEY_IDX_INVALID;
3199 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3200 struct ieee80211_vif *vif,
3201 struct ieee80211_sta *sta)
3203 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3206 return iwl_mvm_sta_from_mac80211(sta);
3209 * The device expects GTKs for station interfaces to be
3210 * installed as GTKs for the AP station. If we have no
3211 * station ID, then use AP's station ID.
3213 if (vif->type == NL80211_IFTYPE_STATION &&
3214 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3215 u8 sta_id = mvmvif->ap_sta_id;
3217 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3218 lockdep_is_held(&mvm->mutex));
3221 * It is possible that the 'sta' parameter is NULL,
3222 * for example when a GTK is removed - the sta_id will then
3223 * be the AP ID, and no station was passed by mac80211.
3225 if (IS_ERR_OR_NULL(sta))
3228 return iwl_mvm_sta_from_mac80211(sta);
3234 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3238 for (i = len - 1; i >= 0; i--) {
3239 if (pn1[i] > pn2[i])
3241 if (pn1[i] < pn2[i])
3248 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3250 struct ieee80211_key_conf *key, bool mcast,
3251 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3252 u8 key_offset, bool mfp)
3255 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3256 struct iwl_mvm_add_sta_key_cmd cmd;
3264 bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3265 IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3266 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3269 if (sta_id == IWL_MVM_INVALID_STA)
3272 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3273 STA_KEY_FLG_KEYID_MSK;
3274 key_flags = cpu_to_le16(keyidx);
3275 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3277 switch (key->cipher) {
3278 case WLAN_CIPHER_SUITE_TKIP:
3279 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3281 memcpy((void *)&u.cmd.tx_mic_key,
3282 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3285 memcpy((void *)&u.cmd.rx_mic_key,
3286 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3288 pn = atomic64_read(&key->tx_pn);
3291 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3292 for (i = 0; i < 5; i++)
3293 u.cmd_v1.tkip_rx_ttak[i] =
3294 cpu_to_le16(tkip_p1k[i]);
3296 memcpy(u.cmd.common.key, key->key, key->keylen);
3298 case WLAN_CIPHER_SUITE_CCMP:
3299 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3300 memcpy(u.cmd.common.key, key->key, key->keylen);
3302 pn = atomic64_read(&key->tx_pn);
3304 case WLAN_CIPHER_SUITE_WEP104:
3305 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3307 case WLAN_CIPHER_SUITE_WEP40:
3308 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3309 memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3311 case WLAN_CIPHER_SUITE_GCMP_256:
3312 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3314 case WLAN_CIPHER_SUITE_GCMP:
3315 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3316 memcpy(u.cmd.common.key, key->key, key->keylen);
3318 pn = atomic64_read(&key->tx_pn);
3321 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3322 memcpy(u.cmd.common.key, key->key, key->keylen);
3326 key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3328 key_flags |= cpu_to_le16(STA_KEY_MFP);
3330 u.cmd.common.key_offset = key_offset;
3331 u.cmd.common.key_flags = key_flags;
3332 u.cmd.common.sta_id = sta_id;
3334 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3339 for (; i < IEEE80211_NUM_TIDS; i++) {
3340 struct ieee80211_key_seq seq = {};
3341 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3343 /* there's a hole at 2/3 in FW format depending on version */
3344 int hole = api_ver >= 3 ? 0 : 2;
3346 ieee80211_get_key_rx_seq(key, i, &seq);
3348 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3349 rx_pn[0] = seq.tkip.iv16;
3350 rx_pn[1] = seq.tkip.iv16 >> 8;
3351 rx_pn[2 + hole] = seq.tkip.iv32;
3352 rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3353 rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3354 rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3355 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3357 rx_pn_len = seq.hw.seq_len;
3359 rx_pn[0] = seq.ccmp.pn[0];
3360 rx_pn[1] = seq.ccmp.pn[1];
3361 rx_pn[2 + hole] = seq.ccmp.pn[2];
3362 rx_pn[3 + hole] = seq.ccmp.pn[3];
3363 rx_pn[4 + hole] = seq.ccmp.pn[4];
3364 rx_pn[5 + hole] = seq.ccmp.pn[5];
3367 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3369 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3374 u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3375 size = sizeof(u.cmd);
3377 size = sizeof(u.cmd_v1);
3380 status = ADD_STA_SUCCESS;
3381 if (cmd_flags & CMD_ASYNC)
3382 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3385 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3389 case ADD_STA_SUCCESS:
3390 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3394 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3401 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3402 struct ieee80211_key_conf *keyconf,
3403 u8 sta_id, bool remove_key)
3405 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3407 /* verify the key details match the required command's expectations */
3408 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3409 (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3410 keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3411 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3412 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3413 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3416 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3417 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3420 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3421 igtk_cmd.sta_id = cpu_to_le32(sta_id);
3424 /* This is a valid situation for IGTK */
3425 if (sta_id == IWL_MVM_INVALID_STA)
3428 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3430 struct ieee80211_key_seq seq;
3433 switch (keyconf->cipher) {
3434 case WLAN_CIPHER_SUITE_AES_CMAC:
3435 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3437 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3438 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3439 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3445 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3446 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3447 igtk_cmd.ctrl_flags |=
3448 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3449 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3450 pn = seq.aes_cmac.pn;
3451 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3452 ((u64) pn[4] << 8) |
3453 ((u64) pn[3] << 16) |
3454 ((u64) pn[2] << 24) |
3455 ((u64) pn[1] << 32) |
3456 ((u64) pn[0] << 40));
3459 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3460 remove_key ? "removing" : "installing",
3461 keyconf->keyidx >= 6 ? "B" : "",
3462 keyconf->keyidx, igtk_cmd.sta_id);
3464 if (!iwl_mvm_has_new_rx_api(mvm)) {
3465 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3466 .ctrl_flags = igtk_cmd.ctrl_flags,
3467 .key_id = igtk_cmd.key_id,
3468 .sta_id = igtk_cmd.sta_id,
3469 .receive_seq_cnt = igtk_cmd.receive_seq_cnt
3472 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3473 ARRAY_SIZE(igtk_cmd_v1.igtk));
3474 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3475 sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3477 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3478 sizeof(igtk_cmd), &igtk_cmd);
3482 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3483 struct ieee80211_vif *vif,
3484 struct ieee80211_sta *sta)
3486 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3491 if (vif->type == NL80211_IFTYPE_STATION &&
3492 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3493 u8 sta_id = mvmvif->ap_sta_id;
3494 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3495 lockdep_is_held(&mvm->mutex));
3503 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3504 struct ieee80211_vif *vif,
3505 struct ieee80211_sta *sta,
3506 struct ieee80211_key_conf *keyconf,
3511 struct ieee80211_key_seq seq;
3517 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3519 sta_id = mvm_sta->sta_id;
3521 } else if (vif->type == NL80211_IFTYPE_AP &&
3522 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3523 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3525 sta_id = mvmvif->mcast_sta.sta_id;
3527 IWL_ERR(mvm, "Failed to find station id\n");
3531 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3532 addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3533 /* get phase 1 key from mac80211 */
3534 ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3535 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3537 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3538 seq.tkip.iv32, p1k, 0, key_offset,
3542 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3543 0, NULL, 0, key_offset, mfp);
3546 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3547 struct ieee80211_vif *vif,
3548 struct ieee80211_sta *sta,
3549 struct ieee80211_key_conf *keyconf,
3552 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3553 struct iwl_mvm_sta *mvm_sta;
3554 u8 sta_id = IWL_MVM_INVALID_STA;
3556 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3558 lockdep_assert_held(&mvm->mutex);
3560 if (vif->type != NL80211_IFTYPE_AP ||
3561 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3562 /* Get the station id from the mvm local station table */
3563 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3565 IWL_ERR(mvm, "Failed to find station\n");
3568 sta_id = mvm_sta->sta_id;
3571 * It is possible that the 'sta' parameter is NULL, and thus
3572 * there is a need to retrieve the sta from the local station
3576 sta = rcu_dereference_protected(
3577 mvm->fw_id_to_mac_id[sta_id],
3578 lockdep_is_held(&mvm->mutex));
3579 if (IS_ERR_OR_NULL(sta)) {
3580 IWL_ERR(mvm, "Invalid station id\n");
3585 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3588 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3590 sta_id = mvmvif->mcast_sta.sta_id;
3593 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3594 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3595 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3596 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3600 /* If the key_offset is not pre-assigned, we need to find a
3601 * new offset to use. In normal cases, the offset is not
3602 * pre-assigned, but during HW_RESTART we want to reuse the
3603 * same indices, so we pass them when this function is called.
3605 * In D3 entry, we need to hardcoded the indices (because the
3606 * firmware hardcodes the PTK offset to 0). In this case, we
3607 * need to make sure we don't overwrite the hw_key_idx in the
3608 * keyconf structure, because otherwise we cannot configure
3609 * the original ones back when resuming.
3611 if (key_offset == STA_KEY_IDX_INVALID) {
3612 key_offset = iwl_mvm_set_fw_key_idx(mvm);
3613 if (key_offset == STA_KEY_IDX_INVALID)
3615 keyconf->hw_key_idx = key_offset;
3618 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3623 * For WEP, the same key is used for multicast and unicast. Upload it
3624 * again, using the same key offset, and now pointing the other one
3625 * to the same key slot (offset).
3626 * If this fails, remove the original as well.
3628 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3629 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3631 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3632 key_offset, !mcast);
3634 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3639 __set_bit(key_offset, mvm->fw_key_table);
3642 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3643 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3644 sta ? sta->addr : zero_addr, ret);
3648 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3649 struct ieee80211_vif *vif,
3650 struct ieee80211_sta *sta,
3651 struct ieee80211_key_conf *keyconf)
3653 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3654 struct iwl_mvm_sta *mvm_sta;
3655 u8 sta_id = IWL_MVM_INVALID_STA;
3658 lockdep_assert_held(&mvm->mutex);
3660 /* Get the station from the mvm local station table */
3661 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3663 sta_id = mvm_sta->sta_id;
3664 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3665 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3668 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3669 keyconf->keyidx, sta_id);
3671 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3672 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3673 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3674 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3676 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3677 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3678 keyconf->hw_key_idx);
3682 /* track which key was deleted last */
3683 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3684 if (mvm->fw_key_deleted[i] < U8_MAX)
3685 mvm->fw_key_deleted[i]++;
3687 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3689 if (sta && !mvm_sta) {
3690 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3694 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3698 /* delete WEP key twice to get rid of (now useless) offset */
3699 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3700 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3701 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3706 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3707 struct ieee80211_vif *vif,
3708 struct ieee80211_key_conf *keyconf,
3709 struct ieee80211_sta *sta, u32 iv32,
3712 struct iwl_mvm_sta *mvm_sta;
3713 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3714 bool mfp = sta ? sta->mfp : false;
3718 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3719 if (WARN_ON_ONCE(!mvm_sta))
3721 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3722 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3729 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3730 struct ieee80211_sta *sta)
3732 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3733 struct iwl_mvm_add_sta_cmd cmd = {
3734 .add_modify = STA_MODE_MODIFY,
3735 .sta_id = mvmsta->sta_id,
3736 .station_flags_msk = cpu_to_le32(STA_FLG_PS),
3737 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3741 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3742 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3744 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3747 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3748 struct ieee80211_sta *sta,
3749 enum ieee80211_frame_release_type reason,
3750 u16 cnt, u16 tids, bool more_data,
3751 bool single_sta_queue)
3753 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3754 struct iwl_mvm_add_sta_cmd cmd = {
3755 .add_modify = STA_MODE_MODIFY,
3756 .sta_id = mvmsta->sta_id,
3757 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3758 .sleep_tx_count = cpu_to_le16(cnt),
3759 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3762 unsigned long _tids = tids;
3764 /* convert TIDs to ACs - we don't support TSPEC so that's OK
3765 * Note that this field is reserved and unused by firmware not
3766 * supporting GO uAPSD, so it's safe to always do this.
3768 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3769 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3771 /* If we're releasing frames from aggregation or dqa queues then check
3772 * if all the queues that we're releasing frames from, combined, have:
3773 * - more frames than the service period, in which case more_data
3775 * - fewer than 'cnt' frames, in which case we need to adjust the
3776 * firmware command (but do that unconditionally)
3778 if (single_sta_queue) {
3779 int remaining = cnt;
3782 spin_lock_bh(&mvmsta->lock);
3783 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3784 struct iwl_mvm_tid_data *tid_data;
3787 tid_data = &mvmsta->tid_data[tid];
3789 n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3790 if (n_queued > remaining) {
3795 remaining -= n_queued;
3797 sleep_tx_count = cnt - remaining;
3798 if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3799 mvmsta->sleep_tx_count = sleep_tx_count;
3800 spin_unlock_bh(&mvmsta->lock);
3802 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3803 if (WARN_ON(cnt - remaining == 0)) {
3804 ieee80211_sta_eosp(sta);
3809 /* Note: this is ignored by firmware not supporting GO uAPSD */
3811 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3813 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3814 mvmsta->next_status_eosp = true;
3815 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3817 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3820 /* block the Tx queues until the FW updated the sleep Tx count */
3821 iwl_trans_block_txq_ptrs(mvm->trans, true);
3823 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3824 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3825 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3827 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3830 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3831 struct iwl_rx_cmd_buffer *rxb)
3833 struct iwl_rx_packet *pkt = rxb_addr(rxb);
3834 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3835 struct ieee80211_sta *sta;
3836 u32 sta_id = le32_to_cpu(notif->sta_id);
3838 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3842 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3843 if (!IS_ERR_OR_NULL(sta))
3844 ieee80211_sta_eosp(sta);
3848 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3849 struct iwl_mvm_sta *mvmsta, bool disable)
3851 struct iwl_mvm_add_sta_cmd cmd = {
3852 .add_modify = STA_MODE_MODIFY,
3853 .sta_id = mvmsta->sta_id,
3854 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3855 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3856 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3860 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3861 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3863 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3866 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
3867 struct ieee80211_sta *sta,
3870 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3872 spin_lock_bh(&mvm_sta->lock);
3874 if (mvm_sta->disable_tx == disable) {
3875 spin_unlock_bh(&mvm_sta->lock);
3879 mvm_sta->disable_tx = disable;
3882 * If sta PS state is handled by mac80211, tell it to start/stop
3883 * queuing tx for this station.
3885 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
3886 ieee80211_sta_block_awake(mvm->hw, sta, disable);
3888 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
3890 spin_unlock_bh(&mvm_sta->lock);
3893 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
3894 struct iwl_mvm_vif *mvmvif,
3895 struct iwl_mvm_int_sta *sta,
3898 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
3899 struct iwl_mvm_add_sta_cmd cmd = {
3900 .add_modify = STA_MODE_MODIFY,
3901 .sta_id = sta->sta_id,
3902 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3903 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3904 .mac_id_n_color = cpu_to_le32(id),
3908 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3909 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3911 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3914 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
3915 struct iwl_mvm_vif *mvmvif,
3918 struct ieee80211_sta *sta;
3919 struct iwl_mvm_sta *mvm_sta;
3924 /* Block/unblock all the stations of the given mvmvif */
3925 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
3926 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
3927 if (IS_ERR_OR_NULL(sta))
3930 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3931 if (mvm_sta->mac_id_n_color !=
3932 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
3935 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
3940 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
3943 /* Need to block/unblock also multicast station */
3944 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
3945 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3946 &mvmvif->mcast_sta, disable);
3949 * Only unblock the broadcast station (FW blocks it for immediate
3950 * quiet, not the driver)
3952 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
3953 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
3954 &mvmvif->bcast_sta, disable);
3957 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
3959 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3960 struct iwl_mvm_sta *mvmsta;
3964 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
3967 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
3972 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
3974 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3977 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3978 * to align the wrap around of ssn so we compare relevant values.
3980 if (mvm->trans->trans_cfg->gen2)
3983 return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
3986 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3987 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
3988 u8 *key, u32 key_len)
3992 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3993 struct ieee80211_key_conf *keyconf;
3995 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
3996 NL80211_IFTYPE_UNSPECIFIED,
4001 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4003 IWL_MVM_TX_FIFO_BE);
4007 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4013 keyconf->cipher = cipher;
4014 memcpy(keyconf->key, key, key_len);
4015 keyconf->keylen = key_len;
4017 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4018 0, NULL, 0, 0, true);
4022 iwl_mvm_dealloc_int_sta(mvm, sta);
4026 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4027 struct ieee80211_vif *vif,
4030 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4031 .mac_id = cpu_to_le32(mac_id),
4035 ret = iwl_mvm_send_cmd_pdu(mvm,
4036 WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4038 sizeof(cancel_channel_switch_cmd),
4039 &cancel_channel_switch_cmd);
4041 IWL_ERR(mvm, "Failed to cancel the channel switch\n");