1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <linux/module.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/vmalloc.h>
10 #include <net/mac80211.h>
12 #include "fw/notif-wait.h"
13 #include "iwl-trans.h"
14 #include "iwl-op-mode.h"
16 #include "iwl-debug.h"
18 #include "iwl-modparams.h"
20 #include "iwl-phy-db.h"
21 #include "iwl-eeprom-parse.h"
26 #include "fw/api/scan.h"
27 #include "fw/api/rfi.h"
28 #include "time-event.h"
32 #include "time-sync.h"
34 #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
35 MODULE_DESCRIPTION(DRV_DESCRIPTION);
36 MODULE_LICENSE("GPL");
37 MODULE_IMPORT_NS(IWLWIFI);
39 static const struct iwl_op_mode_ops iwl_mvm_ops;
40 static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
42 struct iwl_mvm_mod_params iwlmvm_mod_params = {
43 .power_scheme = IWL_POWER_SCHEME_BPS,
44 /* rest of fields are 0 by default */
47 module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
48 MODULE_PARM_DESC(init_dbg,
49 "set to true to debug an ASSERT in INIT fw (default: false");
50 module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
51 MODULE_PARM_DESC(power_scheme,
52 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
55 * module init and exit functions
57 static int __init iwl_mvm_init(void)
61 ret = iwl_mvm_rate_control_register();
63 pr_err("Unable to register rate control algorithm: %d\n", ret);
67 ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
69 pr_err("Unable to register MVM op_mode: %d\n", ret);
73 module_init(iwl_mvm_init);
75 static void __exit iwl_mvm_exit(void)
77 iwl_opmode_deregister("iwlmvm");
78 iwl_mvm_rate_control_unregister();
80 module_exit(iwl_mvm_exit);
82 static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
84 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
85 u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
87 u32 phy_config = iwl_mvm_get_phy_config(mvm);
89 radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
90 FW_PHY_CFG_RADIO_TYPE_POS;
91 radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
92 FW_PHY_CFG_RADIO_STEP_POS;
93 radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
94 FW_PHY_CFG_RADIO_DASH_POS;
96 IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
97 radio_cfg_step, radio_cfg_dash);
99 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
103 reg_val = CSR_HW_REV_STEP_DASH(mvm->trans->hw_rev);
105 /* radio configuration */
106 reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
107 reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
108 reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
110 WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
111 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
114 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
115 * sampling, and shouldn't be set to any non-zero value.
116 * The same is supposed to be true of the other HW, but unsetting
117 * them (such as the 7260) causes automatic tests to fail on seemingly
118 * unrelated errors. Need to further investigate this, but for now
119 * we'll separate cases.
121 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
122 reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
124 if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
125 reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
127 iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
128 CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP_DASH |
129 CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
130 CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
131 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
132 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
133 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI |
134 CSR_HW_IF_CONFIG_REG_D3_DEBUG,
138 * W/A : NIC is stuck in a reset state after Early PCIe power off
139 * (PCIe power is lost before PERST# is asserted), causing ME FW
140 * to lose ownership and not being able to obtain it back.
142 if (!mvm->trans->cfg->apmg_not_supported)
143 iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
144 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
145 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
148 static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
149 struct iwl_rx_cmd_buffer *rxb)
151 struct iwl_rx_packet *pkt = rxb_addr(rxb);
152 struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
153 struct ieee80211_supported_band *sband;
154 const struct ieee80211_sta_he_cap *he_cap;
155 struct ieee80211_vif *vif;
157 if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
160 vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
161 if (!vif || vif->type != NL80211_IFTYPE_STATION)
164 if (!vif->bss_conf.chanreq.oper.chan ||
165 vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
166 vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
172 /* this shouldn't happen *again*, ignore it */
173 if (mvm->cca_40mhz_workaround)
177 * We'll decrement this on disconnect - so set to 2 since we'll
178 * still have to disconnect from the current AP first.
180 mvm->cca_40mhz_workaround = 2;
183 * This capability manipulation isn't really ideal, but it's the
184 * easiest choice - otherwise we'd have to do some major changes
185 * in mac80211 to support this, which isn't worth it. This does
186 * mean that userspace may have outdated information, but that's
187 * actually not an issue at all.
189 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
191 WARN_ON(!sband->ht_cap.ht_supported);
192 WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
193 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
195 he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
198 /* we know that ours is writable */
199 struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap;
201 WARN_ON(!he->has_he);
202 WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
203 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
204 he->he_cap_elem.phy_cap_info[0] &=
205 ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
208 ieee80211_disconnect(vif, true);
211 void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
212 struct ieee80211_bss_conf *link_conf)
214 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
215 struct iwl_mvm *mvm = mvmvif->mvm;
216 enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
221 if (mvm->fw_static_smps_request &&
222 link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
223 link_conf->he_support)
224 mode = IEEE80211_SMPS_STATIC;
226 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode,
230 static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
231 struct ieee80211_vif *vif)
233 struct ieee80211_bss_conf *link_conf;
234 unsigned int link_id;
238 for_each_vif_active_link(vif, link_conf, link_id)
239 iwl_mvm_update_link_smps(vif, link_conf);
244 static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
245 struct iwl_rx_cmd_buffer *rxb)
247 struct iwl_rx_packet *pkt = rxb_addr(rxb);
248 struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
251 * We could pass it to the iterator data, but also need to remember
252 * it for new interfaces that are added while in this state.
254 mvm->fw_static_smps_request =
255 req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
256 ieee80211_iterate_interfaces(mvm->hw,
257 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
258 iwl_mvm_intf_dual_chain_req, NULL);
262 * enum iwl_rx_handler_context: context for Rx handler
263 * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
264 * which can't acquire mvm->mutex.
265 * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
266 * (and only in this case!), it should be set as ASYNC. In that case,
267 * it will be called from a worker with mvm->mutex held.
268 * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
269 * mutex itself, it will be called from a worker without mvm->mutex held.
270 * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
271 * and mvm->mutex. Will be handled with the wiphy_work queue infra
272 * instead of regular work queue.
274 enum iwl_rx_handler_context {
276 RX_HANDLER_ASYNC_LOCKED,
277 RX_HANDLER_ASYNC_UNLOCKED,
278 RX_HANDLER_ASYNC_LOCKED_WIPHY,
282 * struct iwl_rx_handlers: handler for FW notification
283 * @cmd_id: command id
284 * @min_size: minimum size to expect for the notification
285 * @context: see &iwl_rx_handler_context
286 * @fn: the function is called when notification is received
288 struct iwl_rx_handlers {
289 u16 cmd_id, min_size;
290 enum iwl_rx_handler_context context;
291 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
294 #define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context) \
295 { .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
296 #define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context) \
297 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
298 #define RX_HANDLER(_cmd_id, _fn, _context, _struct) \
299 { .cmd_id = _cmd_id, .fn = _fn, \
300 .context = _context, .min_size = sizeof(_struct), }
301 #define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct) \
302 { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, \
303 .context = _context, .min_size = sizeof(_struct), }
306 * Handlers for fw notifications
307 * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
308 * This list should be in order of frequency for performance purposes.
310 * The handler can be one from three contexts, see &iwl_rx_handler_context
312 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
313 RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
314 struct iwl_mvm_tx_resp),
315 RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
316 struct iwl_mvm_ba_notif),
318 RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
319 iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
320 struct iwl_tlc_update_notif),
322 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
323 RX_HANDLER_ASYNC_LOCKED_WIPHY,
324 struct iwl_bt_coex_profile_notif),
325 RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
326 RX_HANDLER_ASYNC_LOCKED),
327 RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
328 RX_HANDLER_ASYNC_LOCKED),
330 RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
331 iwl_mvm_handle_rx_system_oper_stats,
332 RX_HANDLER_ASYNC_LOCKED_WIPHY,
333 struct iwl_system_statistics_notif_oper),
334 RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
335 iwl_mvm_handle_rx_system_oper_part1_stats,
336 RX_HANDLER_ASYNC_LOCKED,
337 struct iwl_system_statistics_part1_notif_oper),
338 RX_HANDLER_GRP(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF,
339 iwl_mvm_handle_rx_system_end_stats_notif,
340 RX_HANDLER_ASYNC_LOCKED,
341 struct iwl_system_statistics_end_notif),
343 RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
344 iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
345 struct iwl_ba_window_status_notif),
347 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
348 RX_HANDLER_SYNC, struct iwl_time_event_notif),
349 RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
350 iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
351 struct iwl_mvm_session_prot_notif),
352 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
353 RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
355 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
356 struct iwl_mvm_eosp_notification),
358 RX_HANDLER(SCAN_ITERATION_COMPLETE,
359 iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
360 struct iwl_lmac_scan_complete_notif),
361 RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
362 iwl_mvm_rx_lmac_scan_complete_notif,
363 RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
364 RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
365 iwl_mvm_rx_scan_match_found,
367 RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
368 RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
369 RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
370 iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
371 struct iwl_umac_scan_iter_complete_notif),
373 RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
374 RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
376 RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
377 struct iwl_error_resp),
378 RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
379 iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
380 struct iwl_uapsd_misbehaving_ap_notif),
381 RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
382 RX_HANDLER_ASYNC_LOCKED),
383 RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
384 iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
385 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
386 iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
387 struct ct_kill_notif),
389 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
390 RX_HANDLER_ASYNC_LOCKED,
391 struct iwl_tdls_channel_switch_notif),
392 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
393 RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
394 RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
395 iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
396 struct iwl_ftm_responder_stats),
398 RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
399 iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
400 RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
401 iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
403 RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
404 iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
405 struct iwl_mfu_assert_dump_notif),
406 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
407 iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
408 struct iwl_stored_beacon_notif_v2),
409 RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
410 iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
411 struct iwl_mu_group_mgmt_notif),
412 RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
413 iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
414 struct iwl_mvm_pm_state_notification),
415 RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
416 iwl_mvm_probe_resp_data_notif,
417 RX_HANDLER_ASYNC_LOCKED,
418 struct iwl_probe_resp_data_notif),
419 RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
420 iwl_mvm_channel_switch_start_notif,
421 RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
422 RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF,
423 iwl_mvm_channel_switch_error_notif,
424 RX_HANDLER_ASYNC_UNLOCKED,
425 struct iwl_channel_switch_error_notif),
426 RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
427 iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
428 struct iwl_datapath_monitor_notif),
430 RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
431 iwl_mvm_rx_thermal_dual_chain_req,
432 RX_HANDLER_ASYNC_LOCKED,
433 struct iwl_thermal_dual_chain_request),
435 RX_HANDLER_GRP(SYSTEM_GROUP, RFI_DEACTIVATE_NOTIF,
436 iwl_rfi_deactivate_notif_handler, RX_HANDLER_ASYNC_UNLOCKED,
437 struct iwl_rfi_deactivate_notif),
439 RX_HANDLER_GRP(LEGACY_GROUP,
440 WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION,
441 iwl_mvm_time_sync_msmt_event, RX_HANDLER_SYNC,
442 struct iwl_time_msmt_notify),
443 RX_HANDLER_GRP(LEGACY_GROUP,
444 WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION,
445 iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
446 struct iwl_time_msmt_cfm_notify),
447 RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
448 iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
449 struct iwl_roc_notif),
452 #undef RX_HANDLER_GRP
454 /* Please keep this array *SORTED* by hex value.
455 * Access is done through binary search
457 static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
458 HCMD_NAME(UCODE_ALIVE_NTFY),
459 HCMD_NAME(REPLY_ERROR),
461 HCMD_NAME(INIT_COMPLETE_NOTIF),
462 HCMD_NAME(PHY_CONTEXT_CMD),
464 HCMD_NAME(SCAN_CFG_CMD),
465 HCMD_NAME(SCAN_REQ_UMAC),
466 HCMD_NAME(SCAN_ABORT_UMAC),
467 HCMD_NAME(SCAN_COMPLETE_UMAC),
468 HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
469 HCMD_NAME(ADD_STA_KEY),
471 HCMD_NAME(REMOVE_STA),
473 HCMD_NAME(SCD_QUEUE_CFG),
474 HCMD_NAME(TXPATH_FLUSH),
475 HCMD_NAME(MGMT_MCAST_KEY),
477 HCMD_NAME(SHARED_MEM_CFG),
478 HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
479 HCMD_NAME(MAC_CONTEXT_CMD),
480 HCMD_NAME(TIME_EVENT_CMD),
481 HCMD_NAME(TIME_EVENT_NOTIFICATION),
482 HCMD_NAME(BINDING_CONTEXT_CMD),
483 HCMD_NAME(TIME_QUOTA_CMD),
484 HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
487 HCMD_NAME(FW_PAGING_BLOCK_CMD),
488 HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
489 HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
490 HCMD_NAME(HOT_SPOT_CMD),
491 HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
492 HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
493 HCMD_NAME(BT_COEX_CI),
494 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
495 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
496 HCMD_NAME(PHY_CONFIGURATION_CMD),
497 HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
498 HCMD_NAME(PHY_DB_CMD),
499 HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
500 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
501 HCMD_NAME(POWER_TABLE_CMD),
502 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
503 HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
504 HCMD_NAME(NVM_ACCESS_CMD),
505 HCMD_NAME(BEACON_NOTIFICATION),
506 HCMD_NAME(BEACON_TEMPLATE_CMD),
507 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
508 HCMD_NAME(BT_CONFIG),
509 HCMD_NAME(STATISTICS_CMD),
510 HCMD_NAME(STATISTICS_NOTIFICATION),
511 HCMD_NAME(EOSP_NOTIFICATION),
512 HCMD_NAME(REDUCE_TX_POWER_CMD),
513 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
514 HCMD_NAME(TDLS_CONFIG_CMD),
515 HCMD_NAME(MAC_PM_POWER_TABLE),
516 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
517 HCMD_NAME(MFUART_LOAD_NOTIFICATION),
518 HCMD_NAME(RSS_CONFIG_CMD),
519 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
520 HCMD_NAME(REPLY_RX_PHY_CMD),
521 HCMD_NAME(REPLY_RX_MPDU_CMD),
522 HCMD_NAME(BAR_FRAME_RELEASE),
523 HCMD_NAME(FRAME_RELEASE),
525 HCMD_NAME(MCC_UPDATE_CMD),
526 HCMD_NAME(MCC_CHUB_UPDATE_CMD),
527 HCMD_NAME(MARKER_CMD),
528 HCMD_NAME(BT_PROFILE_NOTIFICATION),
529 HCMD_NAME(MCAST_FILTER_CMD),
530 HCMD_NAME(REPLY_SF_CFG_CMD),
531 HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
532 HCMD_NAME(D3_CONFIG_CMD),
533 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
534 HCMD_NAME(MATCH_FOUND_NOTIFICATION),
535 HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
536 HCMD_NAME(WOWLAN_PATTERNS),
537 HCMD_NAME(WOWLAN_CONFIGURATION),
538 HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
539 HCMD_NAME(WOWLAN_TKIP_PARAM),
540 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
541 HCMD_NAME(WOWLAN_GET_STATUSES),
542 HCMD_NAME(SCAN_ITERATION_COMPLETE),
543 HCMD_NAME(D0I3_END_CMD),
544 HCMD_NAME(LTR_CONFIG),
545 HCMD_NAME(LDBG_CONFIG_CMD),
548 /* Please keep this array *SORTED* by hex value.
549 * Access is done through binary search
551 static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
552 HCMD_NAME(SHARED_MEM_CFG_CMD),
553 HCMD_NAME(INIT_EXTENDED_CFG_CMD),
554 HCMD_NAME(FW_ERROR_RECOVERY_CMD),
555 HCMD_NAME(RFI_CONFIG_CMD),
556 HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
557 HCMD_NAME(SYSTEM_FEATURES_CONTROL_CMD),
558 HCMD_NAME(SYSTEM_STATISTICS_CMD),
559 HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
560 HCMD_NAME(RFI_DEACTIVATE_NOTIF),
563 /* Please keep this array *SORTED* by hex value.
564 * Access is done through binary search
566 static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
567 HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
568 HCMD_NAME(SESSION_PROTECTION_CMD),
569 HCMD_NAME(MAC_CONFIG_CMD),
570 HCMD_NAME(LINK_CONFIG_CMD),
571 HCMD_NAME(STA_CONFIG_CMD),
572 HCMD_NAME(AUX_STA_CMD),
573 HCMD_NAME(STA_REMOVE_CMD),
574 HCMD_NAME(STA_DISABLE_TX_CMD),
576 HCMD_NAME(ROC_NOTIF),
577 HCMD_NAME(SESSION_PROTECTION_NOTIF),
578 HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
581 /* Please keep this array *SORTED* by hex value.
582 * Access is done through binary search
584 static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
585 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
586 HCMD_NAME(CTDP_CONFIG_CMD),
587 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
588 HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
589 HCMD_NAME(CT_KILL_NOTIFICATION),
590 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
593 /* Please keep this array *SORTED* by hex value.
594 * Access is done through binary search
596 static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
597 HCMD_NAME(DQA_ENABLE_CMD),
598 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
599 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
600 HCMD_NAME(STA_HE_CTXT_CMD),
601 HCMD_NAME(RLC_CONFIG_CMD),
602 HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
603 HCMD_NAME(TLC_MNG_CONFIG_CMD),
604 HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
605 HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
606 HCMD_NAME(SEC_KEY_CMD),
607 HCMD_NAME(MONITOR_NOTIF),
608 HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
609 HCMD_NAME(STA_PM_NOTIF),
610 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
611 HCMD_NAME(RX_QUEUES_NOTIFICATION),
614 /* Please keep this array *SORTED* by hex value.
615 * Access is done through binary search
617 static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
618 HCMD_NAME(STATISTICS_OPER_NOTIF),
619 HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
622 /* Please keep this array *SORTED* by hex value.
623 * Access is done through binary search
625 static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
626 HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
629 /* Please keep this array *SORTED* by hex value.
630 * Access is done through binary search
632 static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
633 HCMD_NAME(TOF_RANGE_REQ_CMD),
634 HCMD_NAME(TOF_CONFIG_CMD),
635 HCMD_NAME(TOF_RANGE_ABORT_CMD),
636 HCMD_NAME(TOF_RANGE_REQ_EXT_CMD),
637 HCMD_NAME(TOF_RESPONDER_CONFIG_CMD),
638 HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD),
639 HCMD_NAME(TOF_LC_NOTIF),
640 HCMD_NAME(TOF_RESPONDER_STATS),
641 HCMD_NAME(TOF_MCSI_DEBUG_NOTIF),
642 HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
645 /* Please keep this array *SORTED* by hex value.
646 * Access is done through binary search
648 static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
649 HCMD_NAME(WOWLAN_WAKE_PKT_NOTIFICATION),
650 HCMD_NAME(WOWLAN_INFO_NOTIFICATION),
651 HCMD_NAME(D3_END_NOTIFICATION),
652 HCMD_NAME(STORED_BEACON_NTF),
655 /* Please keep this array *SORTED* by hex value.
656 * Access is done through binary search
658 static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
659 HCMD_NAME(NVM_ACCESS_COMPLETE),
660 HCMD_NAME(NVM_GET_INFO),
661 HCMD_NAME(TAS_CONFIG),
664 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
665 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
666 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
667 [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
668 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
669 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
670 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
671 [SCAN_GROUP] = HCMD_ARR(iwl_mvm_scan_names),
672 [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names),
673 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
674 [REGULATORY_AND_NVM_GROUP] =
675 HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
676 [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
679 /* this forward declaration can avoid to export the function */
680 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
681 static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
682 struct wiphy_work *work);
684 static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
686 const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
692 iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
694 while (backoff->pwr) {
695 if (dflt_pwr_limit >= backoff->pwr)
696 return backoff->backoff;
704 static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
706 struct iwl_mvm *mvm =
707 container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
708 struct ieee80211_vif *tx_blocked_vif;
709 struct iwl_mvm_vif *mvmvif;
711 mutex_lock(&mvm->mutex);
714 rcu_dereference_protected(mvm->csa_tx_blocked_vif,
715 lockdep_is_held(&mvm->mutex));
720 mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
721 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
722 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
724 mutex_unlock(&mvm->mutex);
727 static void iwl_mvm_fwrt_dump_start(void *ctx)
729 struct iwl_mvm *mvm = ctx;
731 mutex_lock(&mvm->mutex);
734 static void iwl_mvm_fwrt_dump_end(void *ctx)
736 struct iwl_mvm *mvm = ctx;
738 mutex_unlock(&mvm->mutex);
741 static bool iwl_mvm_fwrt_fw_running(void *ctx)
743 return iwl_mvm_firmware_running(ctx);
746 static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
748 struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
751 mutex_lock(&mvm->mutex);
752 ret = iwl_mvm_send_cmd(mvm, host_cmd);
753 mutex_unlock(&mvm->mutex);
758 static bool iwl_mvm_d3_debug_enable(void *ctx)
760 return IWL_MVM_D3_DEBUG;
763 static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
764 .dump_start = iwl_mvm_fwrt_dump_start,
765 .dump_end = iwl_mvm_fwrt_dump_end,
766 .fw_running = iwl_mvm_fwrt_fw_running,
767 .send_hcmd = iwl_mvm_fwrt_send_hcmd,
768 .d3_debug_enable = iwl_mvm_d3_debug_enable,
771 static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
773 struct iwl_trans *trans = mvm->trans;
776 if (trans->csme_own) {
777 if (WARN(!mvm->mei_registered,
778 "csme is owner, but we aren't registered to iwlmei\n"))
779 goto get_nvm_from_fw;
781 mvm->mei_nvm_data = iwl_mei_get_nvm();
782 if (mvm->mei_nvm_data) {
784 * mvm->mei_nvm_data is set and because of that,
785 * we'll load the NVM from the FW when we'll get
789 iwl_parse_mei_nvm_data(trans, trans->cfg,
798 "Got a NULL NVM from CSME, trying to get it from the device\n");
803 wiphy_lock(mvm->hw->wiphy);
804 mutex_lock(&mvm->mutex);
806 ret = iwl_trans_start_hw(mvm->trans);
808 mutex_unlock(&mvm->mutex);
809 wiphy_unlock(mvm->hw->wiphy);
814 ret = iwl_run_init_mvm_ucode(mvm);
815 if (ret && ret != -ERFKILL)
816 iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
817 if (!ret && iwl_mvm_is_lar_supported(mvm)) {
818 mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
819 ret = iwl_mvm_init_mcc(mvm);
822 if (!iwlmvm_mod_params.init_dbg || !ret)
823 iwl_mvm_stop_device(mvm);
825 mutex_unlock(&mvm->mutex);
826 wiphy_unlock(mvm->hw->wiphy);
830 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
832 /* no longer need this regardless of failure or not */
833 mvm->pldr_sync = false;
838 static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
840 struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
843 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
845 ret = iwl_mvm_mac_setup_register(mvm);
849 mvm->hw_registered = true;
851 iwl_mvm_dbgfs_register(mvm);
853 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
854 mvm->mei_rfkill_blocked,
855 RFKILL_HARD_BLOCK_NOT_OWNER);
857 iwl_mvm_mei_set_sw_rfkill_state(mvm);
862 struct iwl_mvm_frob_txf_data {
867 static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
868 struct ieee80211_vif *vif,
869 struct ieee80211_sta *sta,
870 struct ieee80211_key_conf *key,
873 struct iwl_mvm_frob_txf_data *txf = data;
874 u8 keylen, match, matchend;
878 switch (key->cipher) {
879 case WLAN_CIPHER_SUITE_CCMP:
881 keylen = key->keylen;
883 case WLAN_CIPHER_SUITE_WEP40:
884 case WLAN_CIPHER_SUITE_WEP104:
885 case WLAN_CIPHER_SUITE_TKIP:
887 * WEP has short keys which might show up in the payload,
888 * and then you can deduce the key, so in this case just
889 * remove all FIFO data.
890 * For TKIP, we don't know the phase 2 keys here, so same.
892 memset(txf->buf, 0xBB, txf->buflen);
898 /* scan for key material and clear it out */
900 for (i = 0; i < txf->buflen; i++) {
901 if (txf->buf[i] != keydata[match]) {
906 if (match == keylen) {
907 memset(txf->buf + i - keylen, 0xAA, keylen);
912 /* we're dealing with a FIFO, so check wrapped around data */
914 for (i = 0; match && i < keylen - match; i++) {
915 if (txf->buf[i] != keydata[match])
918 if (match == keylen) {
919 memset(txf->buf, 0xAA, i + 1);
920 memset(txf->buf + txf->buflen - matchend, 0xAA,
927 static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
929 struct iwl_mvm_frob_txf_data txf = {
933 struct iwl_mvm *mvm = ctx;
935 /* embedded key material exists only on old API */
936 if (iwl_mvm_has_new_tx_api(mvm))
940 ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
944 static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
946 /* we only use wide headers for commands */
947 struct iwl_cmd_header_wide *hdr = hcmd;
948 unsigned int frob_start = sizeof(*hdr), frob_end = 0;
950 if (len < sizeof(hdr))
953 /* all the commands we care about are in LONG_GROUP */
954 if (hdr->group_id != LONG_GROUP)
959 case WOWLAN_TKIP_PARAM:
960 case WOWLAN_KEK_KCK_MATERIAL:
963 * blank out everything here, easier than dealing
964 * with the various versions of the command
969 frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
970 BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
971 offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
973 frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
974 BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
975 offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
979 if (frob_start >= frob_end)
985 memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
988 static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
990 const struct iwl_dump_exclude *excl;
991 struct iwl_mvm *mvm = ctx;
994 switch (mvm->fwrt.cur_fw_img) {
999 case IWL_UCODE_REGULAR:
1000 case IWL_UCODE_REGULAR_USNIFFER:
1001 excl = mvm->fw->dump_excl;
1003 case IWL_UCODE_WOWLAN:
1004 excl = mvm->fw->dump_excl_wowlan;
1008 BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
1009 sizeof(mvm->fw->dump_excl_wowlan));
1011 for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
1014 if (!excl[i].addr || !excl[i].size)
1017 start = excl[i].addr;
1018 end = start + excl[i].size;
1020 if (end <= mem_addr || start >= mem_addr + buflen)
1023 if (start < mem_addr)
1026 if (end > mem_addr + buflen)
1027 end = mem_addr + buflen;
1029 memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
1033 static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
1034 .frob_txf = iwl_mvm_frob_txf,
1035 .frob_hcmd = iwl_mvm_frob_hcmd,
1036 .frob_mem = iwl_mvm_frob_mem,
1039 static void iwl_mvm_me_conn_status(void *priv, const struct iwl_mei_conn_info *conn_info)
1041 struct iwl_mvm *mvm = priv;
1042 struct iwl_mvm_csme_conn_info *prev_conn_info, *curr_conn_info;
1045 * This is protected by the guarantee that this function will not be
1046 * called twice on two different threads
1048 prev_conn_info = rcu_dereference_protected(mvm->csme_conn_info, true);
1050 curr_conn_info = kzalloc(sizeof(*curr_conn_info), GFP_KERNEL);
1051 if (!curr_conn_info)
1054 curr_conn_info->conn_info = *conn_info;
1056 rcu_assign_pointer(mvm->csme_conn_info, curr_conn_info);
1059 kfree_rcu(prev_conn_info, rcu_head);
1062 static void iwl_mvm_mei_rfkill(void *priv, bool blocked,
1063 bool csme_taking_ownership)
1065 struct iwl_mvm *mvm = priv;
1067 if (blocked && !csme_taking_ownership)
1070 mvm->mei_rfkill_blocked = blocked;
1071 if (!mvm->hw_registered)
1074 wiphy_rfkill_set_hw_state_reason(mvm->hw->wiphy,
1075 mvm->mei_rfkill_blocked,
1076 RFKILL_HARD_BLOCK_NOT_OWNER);
1079 static void iwl_mvm_mei_roaming_forbidden(void *priv, bool forbidden)
1081 struct iwl_mvm *mvm = priv;
1083 if (!mvm->hw_registered || !mvm->csme_vif)
1086 iwl_mvm_send_roaming_forbidden_event(mvm, mvm->csme_vif, forbidden);
1089 static void iwl_mvm_sap_connected_wk(struct work_struct *wk)
1091 struct iwl_mvm *mvm =
1092 container_of(wk, struct iwl_mvm, sap_connected_wk);
1095 ret = iwl_mvm_start_get_nvm(mvm);
1099 ret = iwl_mvm_start_post_nvm(mvm);
1106 IWL_ERR(mvm, "Couldn't get started...\n");
1107 iwl_mei_start_unregister();
1108 iwl_mei_unregister_complete();
1109 iwl_fw_flush_dumps(&mvm->fwrt);
1110 iwl_mvm_thermal_exit(mvm);
1111 iwl_fw_runtime_free(&mvm->fwrt);
1112 iwl_phy_db_free(mvm->phy_db);
1113 kfree(mvm->scan_cmd);
1114 iwl_trans_op_mode_leave(mvm->trans);
1115 kfree(mvm->nvm_data);
1116 kfree(mvm->mei_nvm_data);
1118 ieee80211_free_hw(mvm->hw);
1121 static void iwl_mvm_mei_sap_connected(void *priv)
1123 struct iwl_mvm *mvm = priv;
1125 if (!mvm->hw_registered)
1126 schedule_work(&mvm->sap_connected_wk);
1129 static void iwl_mvm_mei_nic_stolen(void *priv)
1131 struct iwl_mvm *mvm = priv;
1134 cfg80211_shutdown_all_interfaces(mvm->hw->wiphy);
1138 static const struct iwl_mei_ops mei_ops = {
1139 .me_conn_status = iwl_mvm_me_conn_status,
1140 .rfkill = iwl_mvm_mei_rfkill,
1141 .roaming_forbidden = iwl_mvm_mei_roaming_forbidden,
1142 .sap_connected = iwl_mvm_mei_sap_connected,
1143 .nic_stolen = iwl_mvm_mei_nic_stolen,
1146 static struct iwl_op_mode *
1147 iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
1148 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
1150 struct ieee80211_hw *hw;
1151 struct iwl_op_mode *op_mode;
1152 struct iwl_mvm *mvm;
1153 struct iwl_trans_config trans_cfg = {};
1154 static const u8 no_reclaim_cmds[] = {
1160 struct iwl_mvm_csme_conn_info *csme_conn_info __maybe_unused;
1163 * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
1164 * index all over the driver - check that its value corresponds to the
1167 BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
1168 IWL_MVM_STATION_COUNT_MAX);
1170 /********************************
1171 * 1. Allocating and configuring HW data
1172 ********************************/
1173 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
1174 sizeof(struct iwl_mvm),
1175 iwl_mvm_has_mld_api(fw) ? &iwl_mvm_mld_hw_ops :
1180 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1183 max_agg = IEEE80211_MAX_AMPDU_BUF_HE;
1185 hw->max_rx_aggregation_subframes = max_agg;
1187 if (cfg->max_tx_agg_size)
1188 hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
1190 hw->max_tx_aggregation_subframes = max_agg;
1194 mvm = IWL_OP_MODE_GET_MVM(op_mode);
1195 mvm->dev = trans->dev;
1201 iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
1202 &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
1204 iwl_mvm_get_bios_tables(mvm);
1205 iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
1206 iwl_uefi_get_step_table(trans);
1208 mvm->init_status = 0;
1210 if (iwl_mvm_has_new_rx_api(mvm)) {
1211 op_mode->ops = &iwl_mvm_ops_mq;
1212 trans->rx_mpdu_cmd_hdr_size =
1213 (trans->trans_cfg->device_family >=
1214 IWL_DEVICE_FAMILY_AX210) ?
1215 sizeof(struct iwl_rx_mpdu_desc) :
1216 IWL_RX_DESC_SIZE_V1;
1218 op_mode->ops = &iwl_mvm_ops;
1219 trans->rx_mpdu_cmd_hdr_size =
1220 sizeof(struct iwl_rx_mpdu_res_start);
1222 if (WARN_ON(trans->num_rx_queues > 1))
1226 mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
1228 if (iwl_mvm_has_new_tx_api(mvm)) {
1230 * If we have the new TX/queue allocation API initialize them
1231 * all to invalid numbers. We'll rewrite the ones that we need
1232 * later, but that doesn't happen for all of them all of the
1233 * time (e.g. P2P Device is optional), and if a dynamic queue
1234 * ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
1235 * iwl_mvm_is_static_queue() erroneously returns true, and we
1236 * might have things getting stuck.
1238 mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
1239 mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
1240 mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
1241 mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
1243 mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
1244 mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
1245 mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
1246 mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
1249 mvm->sf_state = SF_UNINIT;
1250 if (iwl_mvm_has_unified_ucode(mvm))
1251 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
1253 iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
1254 mvm->drop_bcn_ap_mode = true;
1256 mutex_init(&mvm->mutex);
1257 spin_lock_init(&mvm->async_handlers_lock);
1258 INIT_LIST_HEAD(&mvm->time_event_list);
1259 INIT_LIST_HEAD(&mvm->aux_roc_te_list);
1260 INIT_LIST_HEAD(&mvm->async_handlers_list);
1261 spin_lock_init(&mvm->time_event_lock);
1262 INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
1263 INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
1264 INIT_LIST_HEAD(&mvm->resp_pasn_list);
1266 INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
1267 INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
1268 INIT_WORK(&mvm->sap_connected_wk, iwl_mvm_sap_connected_wk);
1269 INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
1270 INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
1271 INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
1272 INIT_LIST_HEAD(&mvm->add_stream_txqs);
1273 spin_lock_init(&mvm->add_stream_lock);
1275 wiphy_work_init(&mvm->async_handlers_wiphy_wk,
1276 iwl_mvm_async_handlers_wiphy_wk);
1277 init_waitqueue_head(&mvm->rx_sync_waitq);
1279 mvm->queue_sync_state = 0;
1281 SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
1283 spin_lock_init(&mvm->tcm.lock);
1284 INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
1285 mvm->tcm.ts = jiffies;
1286 mvm->tcm.ll_ts = jiffies;
1287 mvm->tcm.uapsd_nonagg_ts = jiffies;
1289 INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
1291 mvm->cmd_ver.range_resp =
1292 iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
1293 TOF_RANGE_RESPONSE_NOTIF, 5);
1294 /* we only support up to version 9 */
1295 if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
1299 * Populate the state variables that the transport layer needs
1302 trans_cfg.op_mode = op_mode;
1303 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1304 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1306 switch (iwlwifi_mod_params.amsdu_size) {
1308 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1311 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1314 trans_cfg.rx_buf_size = IWL_AMSDU_8K;
1317 trans_cfg.rx_buf_size = IWL_AMSDU_12K;
1320 pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
1321 iwlwifi_mod_params.amsdu_size);
1322 trans_cfg.rx_buf_size = IWL_AMSDU_4K;
1325 trans->wide_cmd_header = true;
1326 trans_cfg.bc_table_dword =
1327 mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210;
1329 trans_cfg.command_groups = iwl_mvm_groups;
1330 trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
1332 trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
1333 trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
1334 trans_cfg.scd_set_active = true;
1336 trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
1339 /* Set a short watchdog for the command queue */
1340 trans_cfg.cmd_q_wdg_timeout =
1341 iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
1343 snprintf(mvm->hw->wiphy->fw_version,
1344 sizeof(mvm->hw->wiphy->fw_version),
1345 "%.31s", fw->fw_version);
1347 trans_cfg.fw_reset_handshake = fw_has_capa(&mvm->fw->ucode_capa,
1348 IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE);
1350 trans_cfg.queue_alloc_cmd_ver =
1351 iwl_fw_lookup_cmd_ver(mvm->fw,
1352 WIDE_ID(DATA_PATH_GROUP,
1353 SCD_QUEUE_CONFIG_CMD),
1355 mvm->sta_remove_requires_queue_remove =
1356 trans_cfg.queue_alloc_cmd_ver > 0;
1358 mvm->mld_api_is_used = iwl_mvm_has_mld_api(mvm->fw);
1360 /* Configure transport layer */
1361 iwl_trans_configure(mvm->trans, &trans_cfg);
1363 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1364 trans->dbg.dest_tlv = mvm->fw->dbg.dest_tlv;
1365 trans->dbg.n_dest_reg = mvm->fw->dbg.n_dest_reg;
1366 memcpy(trans->dbg.conf_tlv, mvm->fw->dbg.conf_tlv,
1367 sizeof(trans->dbg.conf_tlv));
1368 trans->dbg.trigger_tlv = mvm->fw->dbg.trigger_tlv;
1370 trans->iml = mvm->fw->iml;
1371 trans->iml_len = mvm->fw->iml_len;
1373 /* set up notification wait support */
1374 iwl_notification_wait_init(&mvm->notif_wait);
1377 mvm->phy_db = iwl_phy_db_init(trans);
1379 IWL_ERR(mvm, "Cannot init phy_db\n");
1383 IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
1384 mvm->trans->name, mvm->trans->hw_rev);
1386 if (iwlwifi_mod_params.nvm_file)
1387 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
1389 IWL_DEBUG_EEPROM(mvm->trans->dev,
1390 "working without external nvm file\n");
1392 scan_size = iwl_mvm_scan_size(mvm);
1394 mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
1397 mvm->scan_cmd_size = scan_size;
1399 /* invalidate ids to prevent accidental removal of sta_id 0 */
1400 mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
1401 mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
1403 /* Set EBS as successful as long as not stated otherwise by the FW. */
1404 mvm->last_ebs_successful = true;
1406 min_backoff = iwl_mvm_min_backoff(mvm);
1407 iwl_mvm_thermal_initialize(mvm, min_backoff);
1409 if (!iwl_mvm_has_new_rx_stats_api(mvm))
1410 memset(&mvm->rx_stats_v3, 0,
1411 sizeof(struct mvm_statistics_rx_v3));
1413 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
1415 iwl_mvm_ftm_initiator_smooth_config(mvm);
1417 iwl_mvm_init_time_sync(&mvm->time_sync);
1419 mvm->debugfs_dir = dbgfs_dir;
1421 mvm->mei_registered = !iwl_mei_register(mvm, &mei_ops);
1423 iwl_mvm_mei_scan_filter_init(&mvm->mei_scan_filter);
1425 if (iwl_mvm_start_get_nvm(mvm)) {
1427 * Getting NVM failed while CSME is the owner, but we are
1428 * registered to MEI, we'll get the NVM later when it'll be
1429 * possible to get it from CSME.
1431 if (trans->csme_own && mvm->mei_registered)
1434 goto out_thermal_exit;
1438 if (iwl_mvm_start_post_nvm(mvm))
1439 goto out_thermal_exit;
1444 iwl_mvm_thermal_exit(mvm);
1445 if (mvm->mei_registered) {
1446 iwl_mei_start_unregister();
1447 iwl_mei_unregister_complete();
1450 iwl_fw_flush_dumps(&mvm->fwrt);
1451 iwl_fw_runtime_free(&mvm->fwrt);
1453 if (iwlmvm_mod_params.init_dbg)
1455 iwl_phy_db_free(mvm->phy_db);
1456 kfree(mvm->scan_cmd);
1457 iwl_trans_op_mode_leave(trans);
1459 ieee80211_free_hw(mvm->hw);
1463 void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1465 lockdep_assert_held(&mvm->mutex);
1467 iwl_fw_cancel_timestamp(&mvm->fwrt);
1469 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1471 iwl_fw_dbg_stop_sync(&mvm->fwrt);
1472 iwl_trans_stop_device(mvm->trans);
1473 iwl_free_fw_paging(&mvm->fwrt);
1474 iwl_fw_dump_conf_clear(&mvm->fwrt);
1475 iwl_mvm_mei_device_state(mvm, false);
1478 static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
1480 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1483 if (mvm->mei_registered) {
1485 iwl_mei_set_netdev(NULL);
1487 iwl_mei_start_unregister();
1491 * After we unregister from mei, the worker can't be scheduled
1494 cancel_work_sync(&mvm->sap_connected_wk);
1496 iwl_mvm_leds_exit(mvm);
1498 iwl_mvm_thermal_exit(mvm);
1501 * If we couldn't get ownership on the device and we couldn't
1502 * get the NVM from CSME, we haven't registered to mac80211.
1503 * In that case, we didn't fail op_mode_start, because we are
1504 * waiting for CSME to allow us to get the NVM to register to
1505 * mac80211. If that didn't happen, we haven't registered to
1506 * mac80211, hence the if below.
1508 if (mvm->hw_registered)
1509 ieee80211_unregister_hw(mvm->hw);
1511 kfree(mvm->scan_cmd);
1512 kfree(mvm->mcast_filter_cmd);
1513 mvm->mcast_filter_cmd = NULL;
1515 kfree(mvm->error_recovery_buf);
1516 mvm->error_recovery_buf = NULL;
1518 iwl_mvm_ptp_remove(mvm);
1520 iwl_trans_op_mode_leave(mvm->trans);
1522 iwl_phy_db_free(mvm->phy_db);
1525 kfree(mvm->nvm_data);
1526 kfree(mvm->mei_nvm_data);
1527 kfree(rcu_access_pointer(mvm->csme_conn_info));
1528 kfree(mvm->temp_nvm_data);
1529 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
1530 kfree(mvm->nvm_sections[i].data);
1532 cancel_delayed_work_sync(&mvm->tcm.work);
1534 iwl_fw_runtime_free(&mvm->fwrt);
1535 mutex_destroy(&mvm->mutex);
1537 if (mvm->mei_registered)
1538 iwl_mei_unregister_complete();
1540 ieee80211_free_hw(mvm->hw);
1543 struct iwl_async_handler_entry {
1544 struct list_head list;
1545 struct iwl_rx_cmd_buffer rxb;
1546 enum iwl_rx_handler_context context;
1547 void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1550 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
1552 struct iwl_async_handler_entry *entry, *tmp;
1554 spin_lock_bh(&mvm->async_handlers_lock);
1555 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
1556 iwl_free_rxb(&entry->rxb);
1557 list_del(&entry->list);
1560 spin_unlock_bh(&mvm->async_handlers_lock);
1564 * This function receives a bitmap of rx async handler contexts
1565 * (&iwl_rx_handler_context) to handle, and runs only them
1567 static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
1570 struct iwl_async_handler_entry *entry, *tmp;
1571 LIST_HEAD(local_list);
1574 * Sync with Rx path with a lock. Remove all the entries of the
1575 * wanted contexts from this list, add them to a local one (lock free),
1576 * and then handle them.
1578 spin_lock_bh(&mvm->async_handlers_lock);
1579 list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
1580 if (!(BIT(entry->context) & contexts))
1582 list_del(&entry->list);
1583 list_add_tail(&entry->list, &local_list);
1585 spin_unlock_bh(&mvm->async_handlers_lock);
1587 list_for_each_entry_safe(entry, tmp, &local_list, list) {
1588 if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
1589 mutex_lock(&mvm->mutex);
1590 entry->fn(mvm, &entry->rxb);
1591 iwl_free_rxb(&entry->rxb);
1592 list_del(&entry->list);
1593 if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
1594 mutex_unlock(&mvm->mutex);
1599 static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
1600 struct wiphy_work *wk)
1602 struct iwl_mvm *mvm =
1603 container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
1604 u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
1606 iwl_mvm_async_handlers_by_context(mvm, contexts);
1609 static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
1611 struct iwl_mvm *mvm =
1612 container_of(wk, struct iwl_mvm, async_handlers_wk);
1613 u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
1614 BIT(RX_HANDLER_ASYNC_UNLOCKED);
1616 iwl_mvm_async_handlers_by_context(mvm, contexts);
1619 static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
1620 struct iwl_rx_packet *pkt)
1622 struct iwl_fw_dbg_trigger_tlv *trig;
1623 struct iwl_fw_dbg_trigger_cmd *cmds_trig;
1626 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1627 FW_DBG_TRIGGER_FW_NOTIF);
1631 cmds_trig = (void *)trig->data;
1633 for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
1634 /* don't collect on CMD 0 */
1635 if (!cmds_trig->cmds[i].cmd_id)
1638 if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
1639 cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
1642 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1643 "CMD 0x%02x.%02x received",
1644 pkt->hdr.group_id, pkt->hdr.cmd);
1649 static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
1650 struct iwl_rx_cmd_buffer *rxb,
1651 struct iwl_rx_packet *pkt)
1653 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1655 union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
1657 iwl_dbg_tlv_time_point(&mvm->fwrt,
1658 IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF, &tp_data);
1659 iwl_mvm_rx_check_trigger(mvm, pkt);
1662 * Do the notification wait before RX handlers so
1663 * even if the RX handler consumes the RXB we have
1664 * access to it in the notification wait entry.
1666 iwl_notification_wait_notify(&mvm->notif_wait, pkt);
1668 for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
1669 const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
1670 struct iwl_async_handler_entry *entry;
1672 if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
1675 if (IWL_FW_CHECK(mvm, pkt_len < rx_h->min_size,
1676 "unexpected notification 0x%04x size %d, need %d\n",
1677 rx_h->cmd_id, pkt_len, rx_h->min_size))
1680 if (rx_h->context == RX_HANDLER_SYNC) {
1685 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1686 /* we can't do much... */
1690 entry->rxb._page = rxb_steal_page(rxb);
1691 entry->rxb._offset = rxb->_offset;
1692 entry->rxb._rx_page_order = rxb->_rx_page_order;
1693 entry->fn = rx_h->fn;
1694 entry->context = rx_h->context;
1695 spin_lock(&mvm->async_handlers_lock);
1696 list_add_tail(&entry->list, &mvm->async_handlers_list);
1697 spin_unlock(&mvm->async_handlers_lock);
1698 if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
1699 wiphy_work_queue(mvm->hw->wiphy,
1700 &mvm->async_handlers_wiphy_wk);
1702 schedule_work(&mvm->async_handlers_wk);
1707 static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
1708 struct napi_struct *napi,
1709 struct iwl_rx_cmd_buffer *rxb)
1711 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1712 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1713 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1715 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1716 iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
1717 else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
1718 iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
1720 iwl_mvm_rx_common(mvm, rxb, pkt);
1723 void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
1724 struct napi_struct *napi,
1725 struct iwl_rx_cmd_buffer *rxb)
1727 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1728 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1729 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
1731 if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
1732 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
1733 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
1734 RX_QUEUES_NOTIFICATION)))
1735 iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
1736 else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
1737 iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
1738 else if (cmd == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE))
1739 iwl_mvm_rx_bar_frame_release(mvm, napi, rxb, 0);
1740 else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
1741 iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
1743 iwl_mvm_rx_common(mvm, rxb, pkt);
1746 static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue)
1748 return queue == mvm->aux_queue || queue == mvm->probe_queue ||
1749 queue == mvm->p2p_dev_queue || queue == mvm->snif_queue;
1752 static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
1753 int hw_queue, bool start)
1755 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1756 struct ieee80211_sta *sta;
1757 struct ieee80211_txq *txq;
1758 struct iwl_mvm_txq *mvmtxq;
1760 unsigned long tid_bitmap;
1761 struct iwl_mvm_sta *mvmsta;
1764 sta_id = iwl_mvm_has_new_tx_api(mvm) ?
1765 mvm->tvqm_info[hw_queue].sta_id :
1766 mvm->queue_info[hw_queue].ra_sta_id;
1768 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
1773 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1774 if (IS_ERR_OR_NULL(sta))
1776 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1778 if (iwl_mvm_is_static_queue(mvm, hw_queue)) {
1780 ieee80211_stop_queues(mvm->hw);
1781 else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
1782 ieee80211_wake_queues(mvm->hw);
1787 if (iwl_mvm_has_new_tx_api(mvm)) {
1788 int tid = mvm->tvqm_info[hw_queue].txq_tid;
1790 tid_bitmap = BIT(tid);
1792 tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
1795 for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1798 if (tid == IWL_MAX_TID_COUNT)
1799 tid = IEEE80211_NUM_TIDS;
1801 txq = sta->txq[tid];
1802 mvmtxq = iwl_mvm_txq_from_mac80211(txq);
1804 clear_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
1806 set_bit(IWL_MVM_TXQ_STATE_STOP_FULL, &mvmtxq->state);
1808 if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) {
1810 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1819 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1821 iwl_mvm_queue_state_change(op_mode, hw_queue, false);
1824 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
1826 iwl_mvm_queue_state_change(op_mode, hw_queue, true);
1829 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
1831 wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
1832 iwl_mvm_is_radio_killed(mvm));
1835 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
1838 set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1840 clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
1842 iwl_mvm_set_rfkill_state(mvm);
1845 struct iwl_mvm_csme_conn_info *iwl_mvm_get_csme_conn_info(struct iwl_mvm *mvm)
1847 return rcu_dereference_protected(mvm->csme_conn_info,
1848 lockdep_is_held(&mvm->mutex));
1851 static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1853 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1854 bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
1855 bool unified = iwl_mvm_has_unified_ucode(mvm);
1858 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1859 wake_up(&mvm->rx_sync_waitq);
1861 clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
1864 iwl_mvm_set_rfkill_state(mvm);
1866 /* iwl_run_init_mvm_ucode is waiting for results, abort it. */
1867 if (rfkill_safe_init_done)
1868 iwl_abort_notification_waits(&mvm->notif_wait);
1871 * Don't ask the transport to stop the firmware. We'll do it
1872 * after cfg80211 takes us down.
1878 * Stop the device if we run OPERATIONAL firmware or if we are in the
1879 * middle of the calibrations.
1881 return state && rfkill_safe_init_done;
1884 static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1886 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
1887 struct ieee80211_tx_info *info;
1889 info = IEEE80211_SKB_CB(skb);
1890 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1891 ieee80211_free_txskb(mvm->hw, skb);
1894 struct iwl_mvm_reprobe {
1896 struct work_struct work;
1899 static void iwl_mvm_reprobe_wk(struct work_struct *wk)
1901 struct iwl_mvm_reprobe *reprobe;
1903 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
1904 if (device_reprobe(reprobe->dev))
1905 dev_err(reprobe->dev, "reprobe failed!\n");
1906 put_device(reprobe->dev);
1908 module_put(THIS_MODULE);
1911 void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1913 iwl_abort_notification_waits(&mvm->notif_wait);
1914 iwl_dbg_tlv_del_timers(mvm->trans);
1917 * This is a bit racy, but worst case we tell mac80211 about
1918 * a stopped/aborted scan when that was already done which
1919 * is not a problem. It is necessary to abort any os scan
1920 * here because mac80211 requires having the scan cleared
1921 * before restarting.
1922 * We'll reset the scan_status to NONE in restart cleanup in
1923 * the next start() call from mac80211. If restart isn't called
1924 * (no fw restart) scan status will stay busy.
1926 iwl_mvm_report_scan_aborted(mvm);
1929 * If we're restarting already, don't cycle restarts.
1930 * If INIT fw asserted, it will likely fail again.
1931 * If WoWLAN fw asserted, don't restart either, mac80211
1932 * can't recover this since we're already half suspended.
1934 if (!mvm->fw_restart && fw_error) {
1935 iwl_fw_error_collect(&mvm->fwrt, false);
1936 } else if (test_bit(IWL_MVM_STATUS_STARTING,
1938 IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
1939 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1940 struct iwl_mvm_reprobe *reprobe;
1943 "Firmware error during reconfiguration - reprobe!\n");
1946 * get a module reference to avoid doing this while unloading
1947 * anyway and to avoid scheduling a work with code that's
1950 if (!try_module_get(THIS_MODULE)) {
1951 IWL_ERR(mvm, "Module is being unloaded - abort\n");
1955 reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
1957 module_put(THIS_MODULE);
1960 reprobe->dev = get_device(mvm->trans->dev);
1961 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
1962 schedule_work(&reprobe->work);
1963 } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1965 IWL_ERR(mvm, "HW restart already requested, but not started\n");
1966 } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
1967 mvm->hw_registered &&
1968 !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1969 /* This should be first thing before trying to collect any
1970 * data to avoid endless loops if any HW error happens while
1971 * collecting debug data.
1973 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1975 if (mvm->fw->ucode_capa.error_log_size) {
1976 u32 src_size = mvm->fw->ucode_capa.error_log_size;
1977 u32 src_addr = mvm->fw->ucode_capa.error_log_addr;
1978 u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC);
1981 mvm->error_recovery_buf = recover_buf;
1982 iwl_trans_read_mem_bytes(mvm->trans,
1989 iwl_fw_error_collect(&mvm->fwrt, false);
1991 if (fw_error && mvm->fw_restart > 0) {
1993 ieee80211_restart_hw(mvm->hw);
1994 } else if (mvm->fwrt.trans->dbg.restart_required) {
1995 IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
1996 mvm->fwrt.trans->dbg.restart_required = false;
1997 ieee80211_restart_hw(mvm->hw);
1998 } else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
1999 ieee80211_restart_hw(mvm->hw);
2004 static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
2006 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2008 if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status) &&
2009 !test_and_clear_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE,
2011 iwl_mvm_dump_nic_error_log(mvm);
2014 iwl_fw_error_collect(&mvm->fwrt, true);
2016 * Currently, the only case for sync=true is during
2017 * shutdown, so just stop in this case. If/when that
2018 * changes, we need to be a bit smarter here.
2024 * If the firmware crashes while we're already considering it
2025 * to be dead then don't ask for a restart, that cannot do
2026 * anything useful anyway.
2028 if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
2031 iwl_mvm_nic_restart(mvm, false);
2034 static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
2036 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2039 iwl_mvm_nic_restart(mvm, true);
2042 static void iwl_op_mode_mvm_time_point(struct iwl_op_mode *op_mode,
2043 enum iwl_fw_ini_time_point tp_id,
2044 union iwl_dbg_tlv_tp_data *tp_data)
2046 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2048 iwl_dbg_tlv_time_point(&mvm->fwrt, tp_id, tp_data);
2051 #define IWL_MVM_COMMON_OPS \
2052 /* these could be differentiated */ \
2053 .queue_full = iwl_mvm_stop_sw_queue, \
2054 .queue_not_full = iwl_mvm_wake_sw_queue, \
2055 .hw_rf_kill = iwl_mvm_set_hw_rfkill_state, \
2056 .free_skb = iwl_mvm_free_skb, \
2057 .nic_error = iwl_mvm_nic_error, \
2058 .cmd_queue_full = iwl_mvm_cmd_queue_full, \
2059 .nic_config = iwl_mvm_nic_config, \
2060 /* as we only register one, these MUST be common! */ \
2061 .start = iwl_op_mode_mvm_start, \
2062 .stop = iwl_op_mode_mvm_stop, \
2063 .time_point = iwl_op_mode_mvm_time_point
2065 static const struct iwl_op_mode_ops iwl_mvm_ops = {
2070 static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
2071 struct napi_struct *napi,
2072 struct iwl_rx_cmd_buffer *rxb,
2075 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
2076 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2077 u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
2079 if (unlikely(queue >= mvm->trans->num_rx_queues))
2082 if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
2083 iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
2084 else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
2085 RX_QUEUES_NOTIFICATION)))
2086 iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
2087 else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
2088 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
2091 static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
2093 .rx = iwl_mvm_rx_mq,
2094 .rx_rss = iwl_mvm_rx_mq_rss,