1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2022-2023 Intel Corporation
9 u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
12 struct iwl_mvm_sta *mvmsta;
19 mvmsta = iwl_mvm_sta_from_mac80211(sta);
21 /* it's easy when the STA is not an MLD */
22 if (!sta->valid_links)
23 return BIT(mvmsta->deflink.sta_id);
25 /* but if it is an MLD, get the mask of all the FW STAs it has ... */
26 for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
27 struct iwl_mvm_link_sta *link_sta;
29 /* unless we have a specific link in mind */
30 if (filter_link_id >= 0 && link_id != filter_link_id)
34 rcu_dereference_check(mvmsta->link[link_id],
35 lockdep_is_held(&mvm->mutex));
39 result |= BIT(link_sta->sta_id);
45 static int iwl_mvm_mld_send_sta_cmd(struct iwl_mvm *mvm,
46 struct iwl_mvm_sta_cfg_cmd *cmd)
48 int ret = iwl_mvm_send_cmd_pdu(mvm,
49 WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD),
50 0, sizeof(*cmd), cmd);
52 IWL_ERR(mvm, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret);
57 * Add an internal station to the FW table
59 static int iwl_mvm_mld_add_int_sta_to_fw(struct iwl_mvm *mvm,
60 struct iwl_mvm_int_sta *sta,
61 const u8 *addr, int link_id)
63 struct iwl_mvm_sta_cfg_cmd cmd;
65 lockdep_assert_held(&mvm->mutex);
67 memset(&cmd, 0, sizeof(cmd));
68 cmd.sta_id = cpu_to_le32((u8)sta->sta_id);
70 cmd.link_id = cpu_to_le32(link_id);
72 cmd.station_type = cpu_to_le32(sta->type);
74 if (fw_has_capa(&mvm->fw->ucode_capa,
75 IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) &&
76 sta->type == STATION_TYPE_BCAST_MGMT)
77 cmd.mfp = cpu_to_le32(1);
80 memcpy(cmd.peer_mld_address, addr, ETH_ALEN);
81 memcpy(cmd.peer_link_address, addr, ETH_ALEN);
84 return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
88 * Remove a station from the FW table. Before sending the command to remove
89 * the station validate that the station is indeed known to the driver (sanity
92 static int iwl_mvm_mld_rm_sta_from_fw(struct iwl_mvm *mvm, u32 sta_id)
94 struct iwl_mvm_remove_sta_cmd rm_sta_cmd = {
95 .sta_id = cpu_to_le32(sta_id),
99 /* Note: internal stations are marked as error values */
100 if (!rcu_access_pointer(mvm->fw_id_to_mac_id[sta_id])) {
101 IWL_ERR(mvm, "Invalid station id %d\n", sta_id);
105 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD),
106 0, sizeof(rm_sta_cmd), &rm_sta_cmd);
108 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
115 static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm,
116 struct iwl_mvm_int_sta *sta,
121 struct iwl_mvm_aux_sta_cmd cmd = {
122 .sta_id = cpu_to_le32(sta->sta_id),
123 .lmac_id = cpu_to_le32(lmac_id),
126 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD),
127 0, sizeof(cmd), &cmd);
129 IWL_ERR(mvm, "Failed to send AUX_STA_CMD\n");
134 * Adds an internal sta to the FW table with its queues
136 int iwl_mvm_mld_add_int_sta_with_queue(struct iwl_mvm *mvm,
137 struct iwl_mvm_int_sta *sta,
138 const u8 *addr, int link_id,
140 unsigned int *_wdg_timeout)
143 unsigned int wdg_timeout = _wdg_timeout ? *_wdg_timeout :
144 mvm->trans->trans_cfg->base_params->wd_timeout;
146 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
149 if (sta->type == STATION_TYPE_AUX)
150 ret = iwl_mvm_add_aux_sta_to_fw(mvm, sta, link_id);
152 ret = iwl_mvm_mld_add_int_sta_to_fw(mvm, sta, addr, link_id);
157 * For 22000 firmware and on we cannot add queue to a station unknown
158 * to firmware so enable queue here - after the station was added
160 txq = iwl_mvm_tvqm_enable_txq(mvm, NULL, sta->sta_id, tid,
163 iwl_mvm_mld_rm_sta_from_fw(mvm, sta->sta_id);
172 * Adds a new int sta: allocate it in the driver, add it to the FW table,
173 * and add its queues.
175 static int iwl_mvm_mld_add_int_sta(struct iwl_mvm *mvm,
176 struct iwl_mvm_int_sta *int_sta, u16 *queue,
177 enum nl80211_iftype iftype,
178 enum iwl_fw_sta_type sta_type,
179 int link_id, const u8 *addr, u8 tid,
180 unsigned int *wdg_timeout)
184 lockdep_assert_held(&mvm->mutex);
186 /* qmask argument is not used in the new tx api, send a don't care */
187 ret = iwl_mvm_allocate_int_sta(mvm, int_sta, 0, iftype,
192 ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, int_sta, addr, link_id,
193 queue, tid, wdg_timeout);
195 iwl_mvm_dealloc_int_sta(mvm, int_sta);
202 /* Allocate a new station entry for the broadcast station to the given vif,
203 * and send it to the FW.
204 * Note that each P2P mac should have its own broadcast station.
206 int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
207 struct ieee80211_bss_conf *link_conf)
209 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
210 struct iwl_mvm_vif_link_info *mvm_link =
211 mvmvif->link[link_conf->link_id];
212 struct iwl_mvm_int_sta *bsta = &mvm_link->bcast_sta;
213 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
214 const u8 *baddr = _baddr;
215 unsigned int wdg_timeout =
216 iwl_mvm_get_wd_timeout(mvm, vif, false, false);
219 lockdep_assert_held(&mvm->mutex);
221 if (vif->type == NL80211_IFTYPE_ADHOC)
222 baddr = link_conf->bssid;
224 if (vif->type == NL80211_IFTYPE_AP ||
225 vif->type == NL80211_IFTYPE_ADHOC) {
226 queue = &mvm_link->mgmt_queue;
227 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
228 queue = &mvm->p2p_dev_queue;
230 WARN(1, "Missing required TXQ for adding bcast STA\n");
234 return iwl_mvm_mld_add_int_sta(mvm, bsta, queue,
235 ieee80211_vif_type_p2p(vif),
236 STATION_TYPE_BCAST_MGMT,
237 mvm_link->fw_link_id, baddr,
238 IWL_MAX_TID_COUNT, &wdg_timeout);
241 /* Allocate a new station entry for the broadcast station to the given vif,
242 * and send it to the FW.
243 * Note that each AP/GO mac should have its own multicast station.
245 int iwl_mvm_mld_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
246 struct ieee80211_bss_conf *link_conf)
248 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
249 struct iwl_mvm_vif_link_info *mvm_link =
250 mvmvif->link[link_conf->link_id];
251 struct iwl_mvm_int_sta *msta = &mvm_link->mcast_sta;
252 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
253 const u8 *maddr = _maddr;
254 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
256 lockdep_assert_held(&mvm->mutex);
258 if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
259 vif->type != NL80211_IFTYPE_ADHOC))
262 /* In IBSS, ieee80211_check_queues() sets the cab_queue to be
263 * invalid, so make sure we use the queue we want.
264 * Note that this is done here as we want to avoid making DQA
265 * changes in mac80211 layer.
267 if (vif->type == NL80211_IFTYPE_ADHOC)
268 mvm_link->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
270 return iwl_mvm_mld_add_int_sta(mvm, msta, &mvm_link->cab_queue,
271 vif->type, STATION_TYPE_MCAST,
272 mvm_link->fw_link_id, maddr, 0,
276 /* Allocate a new station entry for the sniffer station to the given vif,
277 * and send it to the FW.
279 int iwl_mvm_mld_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
280 struct ieee80211_bss_conf *link_conf)
282 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
283 struct iwl_mvm_vif_link_info *mvm_link =
284 mvmvif->link[link_conf->link_id];
286 lockdep_assert_held(&mvm->mutex);
288 return iwl_mvm_mld_add_int_sta(mvm, &mvm->snif_sta, &mvm->snif_queue,
289 vif->type, STATION_TYPE_BCAST_MGMT,
290 mvm_link->fw_link_id, NULL,
291 IWL_MAX_TID_COUNT, NULL);
294 int iwl_mvm_mld_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
296 lockdep_assert_held(&mvm->mutex);
298 /* In CDB NICs we need to specify which lmac to use for aux activity;
299 * use the link_id argument place to send lmac_id to the function.
301 return iwl_mvm_mld_add_int_sta(mvm, &mvm->aux_sta, &mvm->aux_queue,
302 NL80211_IFTYPE_UNSPECIFIED,
303 STATION_TYPE_AUX, lmac_id, NULL,
304 IWL_MAX_TID_COUNT, NULL);
307 static int iwl_mvm_mld_disable_txq(struct iwl_mvm *mvm, u32 sta_mask,
308 u16 *queueptr, u8 tid)
310 int queue = *queueptr;
313 if (tid == IWL_MAX_TID_COUNT)
316 if (mvm->sta_remove_requires_queue_remove) {
317 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
318 SCD_QUEUE_CONFIG_CMD);
319 struct iwl_scd_queue_cfg_cmd remove_cmd = {
320 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
321 .u.remove.tid = cpu_to_le32(tid),
322 .u.remove.sta_mask = cpu_to_le32(sta_mask),
325 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
330 iwl_trans_txq_free(mvm->trans, queue);
331 *queueptr = IWL_MVM_INVALID_QUEUE;
336 /* Removes a sta from the FW table, disable its queues, and dealloc it
338 static int iwl_mvm_mld_rm_int_sta(struct iwl_mvm *mvm,
339 struct iwl_mvm_int_sta *int_sta,
340 bool flush, u8 tid, u16 *queuptr)
344 lockdep_assert_held(&mvm->mutex);
346 if (WARN_ON_ONCE(int_sta->sta_id == IWL_MVM_INVALID_STA))
350 iwl_mvm_flush_sta(mvm, int_sta->sta_id, int_sta->tfd_queue_msk);
352 iwl_mvm_mld_disable_txq(mvm, BIT(int_sta->sta_id), queuptr, tid);
354 ret = iwl_mvm_mld_rm_sta_from_fw(mvm, int_sta->sta_id);
356 IWL_WARN(mvm, "Failed sending remove station\n");
358 iwl_mvm_dealloc_int_sta(mvm, int_sta);
363 int iwl_mvm_mld_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
364 struct ieee80211_bss_conf *link_conf)
366 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
367 struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id];
370 lockdep_assert_held(&mvm->mutex);
376 case NL80211_IFTYPE_AP:
377 case NL80211_IFTYPE_ADHOC:
378 queueptr = &link->mgmt_queue;
380 case NL80211_IFTYPE_P2P_DEVICE:
381 queueptr = &mvm->p2p_dev_queue;
384 WARN(1, "Can't free bcast queue on vif type %d\n",
389 return iwl_mvm_mld_rm_int_sta(mvm, &link->bcast_sta,
390 true, IWL_MAX_TID_COUNT, queueptr);
393 /* Send the FW a request to remove the station from it's internal data
394 * structures, and in addition remove it from the local data structure.
396 int iwl_mvm_mld_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
397 struct ieee80211_bss_conf *link_conf)
399 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
400 struct iwl_mvm_vif_link_info *link = mvmvif->link[link_conf->link_id];
402 lockdep_assert_held(&mvm->mutex);
407 return iwl_mvm_mld_rm_int_sta(mvm, &link->mcast_sta, true, 0,
411 int iwl_mvm_mld_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
413 lockdep_assert_held(&mvm->mutex);
415 return iwl_mvm_mld_rm_int_sta(mvm, &mvm->snif_sta, false,
416 IWL_MAX_TID_COUNT, &mvm->snif_queue);
419 int iwl_mvm_mld_rm_aux_sta(struct iwl_mvm *mvm)
421 lockdep_assert_held(&mvm->mutex);
423 return iwl_mvm_mld_rm_int_sta(mvm, &mvm->aux_sta, false,
424 IWL_MAX_TID_COUNT, &mvm->aux_queue);
427 /* send a cfg sta command to add/update a sta in firmware */
428 static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
429 struct ieee80211_vif *vif,
430 struct ieee80211_link_sta *link_sta,
431 struct ieee80211_bss_conf *link_conf,
432 struct iwl_mvm_link_sta *mvm_link_sta)
434 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
435 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
436 struct iwl_mvm_vif_link_info *link_info =
437 mvm_vif->link[link_conf->link_id];
438 struct iwl_mvm_sta_cfg_cmd cmd = {
439 .sta_id = cpu_to_le32(mvm_link_sta->sta_id),
440 .station_type = cpu_to_le32(mvm_sta->sta_type),
442 u32 agg_size = 0, mpdu_dens = 0;
444 /* when adding sta, link should exist in FW */
445 if (WARN_ON(link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID))
448 cmd.link_id = cpu_to_le32(link_info->fw_link_id);
450 memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN);
451 memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN);
453 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
454 cmd.assoc_id = cpu_to_le32(sta->aid);
456 if (fw_has_capa(&mvm->fw->ucode_capa,
457 IWL_UCODE_TLV_CAPA_STA_EXP_MFP_SUPPORT) &&
458 (sta->mfp || mvm_sta->sta_state < IEEE80211_STA_AUTHORIZED))
459 cmd.mfp = cpu_to_le32(1);
461 switch (link_sta->rx_nss) {
463 cmd.mimo = cpu_to_le32(0);
466 cmd.mimo = cpu_to_le32(1);
470 switch (sta->deflink.smps_mode) {
471 case IEEE80211_SMPS_AUTOMATIC:
472 case IEEE80211_SMPS_NUM_MODES:
475 case IEEE80211_SMPS_STATIC:
477 cmd.mimo = cpu_to_le32(0);
479 case IEEE80211_SMPS_DYNAMIC:
480 cmd.mimo_protection = cpu_to_le32(1);
482 case IEEE80211_SMPS_OFF:
487 mpdu_dens = iwl_mvm_get_sta_ampdu_dens(link_sta, link_conf, &agg_size);
488 cmd.tx_ampdu_spacing = cpu_to_le32(mpdu_dens);
489 cmd.tx_ampdu_max_size = cpu_to_le32(agg_size);
493 cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128);
494 cmd.uapsd_acs = cpu_to_le32(iwl_mvm_get_sta_uapsd_acs(sta));
497 if (link_sta->he_cap.has_he) {
499 cpu_to_le32(link_conf->uora_exists ? 1 : 0);
502 iwl_mvm_set_sta_pkt_ext(mvm, link_sta, &cmd.pkt_ext);
505 cmd.htc_flags = iwl_mvm_get_sta_htc_flags(sta, link_sta);
507 if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] &
508 IEEE80211_HE_MAC_CAP2_ACK_EN)
509 cmd.ack_enabled = cpu_to_le32(1);
512 return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
515 static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
516 struct iwl_mvm_sta *mvm_sta,
517 struct iwl_mvm_link_sta *mvm_sta_link,
518 unsigned int link_id,
521 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
522 is_in_fw ? ERR_PTR(-EINVAL) : NULL);
523 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[mvm_sta_link->sta_id], NULL);
524 RCU_INIT_POINTER(mvm_sta->link[link_id], NULL);
526 if (mvm_sta_link != &mvm_sta->deflink)
527 kfree_rcu(mvm_sta_link, rcu_head);
530 static void iwl_mvm_mld_sta_rm_all_sta_links(struct iwl_mvm *mvm,
531 struct iwl_mvm_sta *mvm_sta)
533 unsigned int link_id;
535 for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
536 struct iwl_mvm_link_sta *link =
537 rcu_dereference_protected(mvm_sta->link[link_id],
538 lockdep_is_held(&mvm->mutex));
543 iwl_mvm_mld_free_sta_link(mvm, mvm_sta, link, link_id, false);
547 static int iwl_mvm_mld_alloc_sta_link(struct iwl_mvm *mvm,
548 struct ieee80211_vif *vif,
549 struct ieee80211_sta *sta,
550 unsigned int link_id)
552 struct ieee80211_link_sta *link_sta =
553 link_sta_dereference_protected(sta, link_id);
554 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
555 struct iwl_mvm_link_sta *link;
556 u32 sta_id = iwl_mvm_find_free_sta_id(mvm,
557 ieee80211_vif_type_p2p(vif));
559 if (sta_id == IWL_MVM_INVALID_STA)
562 if (rcu_access_pointer(sta->link[link_id]) == &sta->deflink) {
563 link = &mvm_sta->deflink;
565 link = kzalloc(sizeof(*link), GFP_KERNEL);
570 link->sta_id = sta_id;
571 rcu_assign_pointer(mvm_sta->link[link_id], link);
572 rcu_assign_pointer(mvm->fw_id_to_mac_id[link->sta_id], sta);
573 rcu_assign_pointer(mvm->fw_id_to_link_sta[link->sta_id],
579 /* allocate all the links of a sta, called when the station is first added */
580 static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
581 struct ieee80211_vif *vif,
582 struct ieee80211_sta *sta)
584 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
585 unsigned int link_id;
588 lockdep_assert_held(&mvm->mutex);
590 for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
591 if (!rcu_access_pointer(sta->link[link_id]) ||
592 mvm_sta->link[link_id])
595 ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
603 iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta);
607 static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
608 struct iwl_mvm_vif_link_info *vif_link,
609 struct iwl_mvm_link_sta *sta_link)
612 WARN_ON(vif_link->ap_sta_id != IWL_MVM_INVALID_STA);
613 vif_link->ap_sta_id = sta_link->sta_id;
615 WARN_ON(vif_link->ap_sta_id == IWL_MVM_INVALID_STA);
619 /* FIXME: consider waiting for mac80211 to add the STA instead of allocating
622 static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
623 struct ieee80211_vif *vif,
624 struct ieee80211_sta *sta)
626 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
627 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
628 struct ieee80211_link_sta *link_sta;
629 unsigned int link_id;
630 /* no active link found */
634 /* First add an empty station since allocating a queue requires
635 * a valid station. Since we need a link_id to allocate a station,
636 * pick up the first valid one.
638 for_each_sta_active_link(vif, sta, link_sta, link_id) {
639 struct iwl_mvm_vif_link_info *mvm_link;
640 struct ieee80211_bss_conf *link_conf =
641 link_conf_dereference_protected(vif, link_id);
642 struct iwl_mvm_link_sta *mvm_link_sta =
643 rcu_dereference_protected(mvm_sta->link[link_id],
644 lockdep_is_held(&mvm->mutex));
649 mvm_link = mvmvif->link[link_conf->link_id];
651 if (!mvm_link || !mvm_link_sta)
654 sta_id = mvm_link_sta->sta_id;
655 ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta,
656 link_conf, mvm_link_sta);
660 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
661 rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id], link_sta);
665 iwl_mvm_realloc_queues_after_restart(mvm, sta);
670 int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
671 struct ieee80211_sta *sta)
673 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
674 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
675 unsigned long link_sta_added_to_fw = 0;
676 struct ieee80211_link_sta *link_sta;
678 unsigned int link_id;
680 lockdep_assert_held(&mvm->mutex);
682 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
683 ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
687 spin_lock_init(&mvm_sta->lock);
689 ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
692 ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
698 /* at this stage sta link pointers are already allocated */
699 ret = iwl_mvm_mld_update_sta(mvm, vif, sta);
703 for_each_sta_active_link(vif, sta, link_sta, link_id) {
704 struct ieee80211_bss_conf *link_conf =
705 link_conf_dereference_protected(vif, link_id);
706 struct iwl_mvm_link_sta *mvm_link_sta =
707 rcu_dereference_protected(mvm_sta->link[link_id],
708 lockdep_is_held(&mvm->mutex));
710 if (WARN_ON(!link_conf || !mvm_link_sta)) {
715 ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
720 link_sta_added_to_fw |= BIT(link_id);
722 if (vif->type == NL80211_IFTYPE_STATION)
723 iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id],
730 /* remove all already allocated stations in FW */
731 for_each_set_bit(link_id, &link_sta_added_to_fw,
732 IEEE80211_MLD_MAX_NUM_LINKS) {
733 struct iwl_mvm_link_sta *mvm_link_sta =
734 rcu_dereference_protected(mvm_sta->link[link_id],
735 lockdep_is_held(&mvm->mutex));
737 iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_link_sta->sta_id);
740 /* free all sta resources in the driver */
741 iwl_mvm_mld_sta_rm_all_sta_links(mvm, mvm_sta);
745 int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
746 struct ieee80211_sta *sta)
748 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
749 struct ieee80211_link_sta *link_sta;
750 unsigned int link_id;
753 lockdep_assert_held(&mvm->mutex);
755 for_each_sta_active_link(vif, sta, link_sta, link_id) {
756 struct ieee80211_bss_conf *link_conf =
757 link_conf_dereference_protected(vif, link_id);
758 struct iwl_mvm_link_sta *mvm_link_sta =
759 rcu_dereference_protected(mvm_sta->link[link_id],
760 lockdep_is_held(&mvm->mutex));
762 if (WARN_ON(!link_conf || !mvm_link_sta))
765 ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
769 IWL_ERR(mvm, "Failed to update sta link %d\n", link_id);
777 static void iwl_mvm_mld_disable_sta_queues(struct iwl_mvm *mvm,
778 struct ieee80211_vif *vif,
779 struct ieee80211_sta *sta)
781 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
782 u32 sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
785 lockdep_assert_held(&mvm->mutex);
787 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
788 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
791 iwl_mvm_mld_disable_txq(mvm, sta_mask,
792 &mvm_sta->tid_data[i].txq_id, i);
793 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
796 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
797 struct iwl_mvm_txq *mvmtxq =
798 iwl_mvm_txq_from_mac80211(sta->txq[i]);
800 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
804 int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
805 struct ieee80211_sta *sta)
807 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
808 struct ieee80211_link_sta *link_sta;
809 unsigned int link_id;
812 lockdep_assert_held(&mvm->mutex);
814 /* flush its queues here since we are freeing mvm_sta */
815 for_each_sta_active_link(vif, sta, link_sta, link_id) {
816 struct iwl_mvm_link_sta *mvm_link_sta =
817 rcu_dereference_protected(mvm_sta->link[link_id],
818 lockdep_is_held(&mvm->mutex));
820 if (WARN_ON(!mvm_link_sta))
823 ret = iwl_mvm_flush_sta_tids(mvm, mvm_link_sta->sta_id,
829 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
833 iwl_mvm_mld_disable_sta_queues(mvm, vif, sta);
835 for_each_sta_active_link(vif, sta, link_sta, link_id) {
836 struct iwl_mvm_link_sta *mvm_link_sta =
837 rcu_dereference_protected(mvm_sta->link[link_id],
838 lockdep_is_held(&mvm->mutex));
841 stay_in_fw = iwl_mvm_sta_del(mvm, vif, sta, link_sta, &ret);
846 ret = iwl_mvm_mld_rm_sta_from_fw(mvm,
847 mvm_link_sta->sta_id);
849 iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
850 link_id, stay_in_fw);
856 int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)
858 int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);
860 lockdep_assert_held(&mvm->mutex);
862 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
863 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
867 void iwl_mvm_mld_sta_modify_disable_tx(struct iwl_mvm *mvm,
868 struct iwl_mvm_sta *mvmsta,
871 struct iwl_mvm_sta_disable_tx_cmd cmd;
874 cmd.sta_id = cpu_to_le32(mvmsta->deflink.sta_id);
875 cmd.disable = cpu_to_le32(disable);
877 ret = iwl_mvm_send_cmd_pdu(mvm,
878 WIDE_ID(MAC_CONF_GROUP, STA_DISABLE_TX_CMD),
879 CMD_ASYNC, sizeof(cmd), &cmd);
882 "Failed to send STA_DISABLE_TX_CMD command (%d)\n",
886 void iwl_mvm_mld_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
887 struct ieee80211_sta *sta,
890 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
892 spin_lock_bh(&mvm_sta->lock);
894 if (mvm_sta->disable_tx == disable) {
895 spin_unlock_bh(&mvm_sta->lock);
899 iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable);
901 spin_unlock_bh(&mvm_sta->lock);
904 void iwl_mvm_mld_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
905 struct iwl_mvm_vif *mvmvif,
908 struct ieee80211_sta *sta;
909 struct iwl_mvm_sta *mvm_sta;
914 /* Block/unblock all the stations of the given mvmvif */
915 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
916 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
917 if (IS_ERR_OR_NULL(sta))
920 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
921 if (mvm_sta->mac_id_n_color !=
922 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
925 iwl_mvm_mld_sta_modify_disable_tx(mvm, mvm_sta, disable);
931 static int iwl_mvm_mld_update_sta_queues(struct iwl_mvm *mvm,
932 struct ieee80211_sta *sta,
936 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
937 struct iwl_scd_queue_cfg_cmd cmd = {
938 .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
939 .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
940 .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
942 struct iwl_host_cmd hcmd = {
943 .id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
944 .len[0] = sizeof(cmd),
950 lockdep_assert_held(&mvm->mutex);
952 for (tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
953 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[tid];
954 int txq_id = tid_data->txq_id;
956 if (txq_id == IWL_MVM_INVALID_QUEUE)
959 if (tid == IWL_MAX_TID_COUNT)
960 cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
962 cmd.u.modify.tid = cpu_to_le32(tid);
964 ret = iwl_mvm_send_cmd(mvm, &hcmd);
972 static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
976 struct iwl_rx_baid_cfg_cmd cmd = {
977 .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
978 .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
979 .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
981 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
984 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
986 for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
987 struct iwl_mvm_baid_data *data;
990 data = rcu_dereference_protected(mvm->baid_map[baid],
991 lockdep_is_held(&mvm->mutex));
995 if (!(data->sta_mask & old_sta_mask))
998 WARN_ONCE(data->sta_mask != old_sta_mask,
999 "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
1000 baid, old_sta_mask, data->sta_mask);
1002 cmd.modify.tid = cpu_to_le32(data->tid);
1004 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
1005 data->sta_mask = new_sta_mask;
1013 static int iwl_mvm_mld_update_sta_resources(struct iwl_mvm *mvm,
1014 struct ieee80211_vif *vif,
1015 struct ieee80211_sta *sta,
1021 ret = iwl_mvm_mld_update_sta_queues(mvm, sta,
1027 ret = iwl_mvm_mld_update_sta_keys(mvm, vif, sta,
1033 return iwl_mvm_mld_update_sta_baids(mvm, old_sta_mask, new_sta_mask);
1036 int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
1037 struct ieee80211_vif *vif,
1038 struct ieee80211_sta *sta,
1039 u16 old_links, u16 new_links)
1041 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1042 struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
1043 struct iwl_mvm_link_sta *mvm_sta_link;
1044 struct iwl_mvm_vif_link_info *mvm_vif_link;
1045 unsigned long links_to_add = ~old_links & new_links;
1046 unsigned long links_to_rem = old_links & ~new_links;
1047 unsigned long old_links_long = old_links;
1048 u32 current_sta_mask = 0, sta_mask_added = 0, sta_mask_to_rem = 0;
1049 unsigned long link_sta_added_to_fw = 0, link_sta_allocated = 0;
1050 unsigned int link_id;
1053 lockdep_assert_held(&mvm->mutex);
1055 for_each_set_bit(link_id, &old_links_long,
1056 IEEE80211_MLD_MAX_NUM_LINKS) {
1058 rcu_dereference_protected(mvm_sta->link[link_id],
1059 lockdep_is_held(&mvm->mutex));
1061 if (WARN_ON(!mvm_sta_link)) {
1066 current_sta_mask |= BIT(mvm_sta_link->sta_id);
1067 if (links_to_rem & BIT(link_id))
1068 sta_mask_to_rem |= BIT(mvm_sta_link->sta_id);
1071 if (sta_mask_to_rem) {
1072 ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
1079 current_sta_mask &= ~sta_mask_to_rem;
1082 for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) {
1084 rcu_dereference_protected(mvm_sta->link[link_id],
1085 lockdep_is_held(&mvm->mutex));
1086 mvm_vif_link = mvm_vif->link[link_id];
1088 if (WARN_ON(!mvm_sta_link || !mvm_vif_link)) {
1093 ret = iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
1097 if (vif->type == NL80211_IFTYPE_STATION)
1098 mvm_vif_link->ap_sta_id = IWL_MVM_INVALID_STA;
1100 iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,
1104 for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
1105 struct ieee80211_bss_conf *link_conf =
1106 link_conf_dereference_protected(vif, link_id);
1107 struct ieee80211_link_sta *link_sta =
1108 link_sta_dereference_protected(sta, link_id);
1109 mvm_vif_link = mvm_vif->link[link_id];
1111 if (WARN_ON(!mvm_vif_link || !link_conf || !link_sta)) {
1116 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1117 if (WARN_ON(!mvm_sta->link[link_id])) {
1122 if (WARN_ON(mvm_sta->link[link_id])) {
1126 ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta,
1132 link_sta->agg.max_rc_amsdu_len = 1;
1133 ieee80211_sta_recalc_aggregates(sta);
1136 rcu_dereference_protected(mvm_sta->link[link_id],
1137 lockdep_is_held(&mvm->mutex));
1139 if (WARN_ON(!mvm_sta_link)) {
1144 if (vif->type == NL80211_IFTYPE_STATION)
1145 iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif_link,
1148 link_sta_allocated |= BIT(link_id);
1150 sta_mask_added |= BIT(mvm_sta_link->sta_id);
1152 ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
1157 link_sta_added_to_fw |= BIT(link_id);
1159 iwl_mvm_rs_add_sta_link(mvm, mvm_sta_link);
1162 if (sta_mask_added) {
1163 ret = iwl_mvm_mld_update_sta_resources(mvm, vif, sta,
1174 /* remove all already allocated stations in FW */
1175 for_each_set_bit(link_id, &link_sta_added_to_fw,
1176 IEEE80211_MLD_MAX_NUM_LINKS) {
1178 rcu_dereference_protected(mvm_sta->link[link_id],
1179 lockdep_is_held(&mvm->mutex));
1181 iwl_mvm_mld_rm_sta_from_fw(mvm, mvm_sta_link->sta_id);
1184 /* remove all already allocated station links in driver */
1185 for_each_set_bit(link_id, &link_sta_allocated,
1186 IEEE80211_MLD_MAX_NUM_LINKS) {
1188 rcu_dereference_protected(mvm_sta->link[link_id],
1189 lockdep_is_held(&mvm->mutex));
1191 iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_sta_link, link_id,