Merge tag 'iwlwifi-next-for-kalle-2021-02-05' of git://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Mon, 8 Feb 2021 16:52:00 +0000 (18:52 +0200)
committerKalle Valo <kvalo@codeaurora.org>
Mon, 8 Feb 2021 16:52:00 +0000 (18:52 +0200)
iwlwifi patches intended for v5.12

* Check FW notification sizes for robustness;
* Improvements in the NAPI implementation;
* Implement a workaround for CCA-EXT;
* Add new FW API support;
* Fix a CSA bug;
* Implement PHY integration version parsing;
* A bit of refactoring;
* One more CSA bug fix, this time in the AP side;
* Support for new So devices and a bit of reorg;
* Per Platform Antenna Gain (PPAG) fixes and improvements;
* Improvements in the debug framework;
* Some other clean-ups and small fixes.

# gpg: Signature made Fri 05 Feb 2021 12:04:21 PM EET using RSA key ID 1A3CC5FA
# gpg: Good signature from "Luciano Roth Coelho (Luca) <luca@coelho.fi>"
# gpg:                 aka "Luciano Roth Coelho (Intel) <luciano.coelho@intel.com>"

40 files changed:
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/fw/img.h
drivers/net/wireless/intel/iwlwifi/fw/init.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.h

index 8280092..e0c7410 100644 (file)
@@ -9,7 +9,7 @@
 #include "iwl-prph.h"
 
 /* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX        59
+#define IWL_22000_UCODE_API_MAX        61
 
 /* Lowest firmware API version supported */
 #define IWL_22000_UCODE_API_MIN        39
@@ -42,6 +42,7 @@
 #define IWL_SNJ_A_GF4_A_FW_PRE         "iwlwifi-SoSnj-a0-gf4-a0-"
 #define IWL_SNJ_A_GF_A_FW_PRE          "iwlwifi-SoSnj-a0-gf-a0-"
 #define IWL_SNJ_A_HR_B_FW_PRE          "iwlwifi-SoSnj-a0-hr-b0-"
+#define IWL_SNJ_A_JF_B_FW_PRE          "iwlwifi-SoSnj-a0-jf-b0-"
 #define IWL_MA_A_GF_A_FW_PRE           "iwlwifi-ma-a0-gf-a0-"
 #define IWL_MA_A_MR_A_FW_PRE           "iwlwifi-ma-a0-mr-a0-"
 #define IWL_SNJ_A_MR_A_FW_PRE          "iwlwifi-SoSnj-a0-mr-a0-"
@@ -76,7 +77,9 @@
        IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \
        IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
-#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
+#define IWL_SNJ_A_JF_B_MODULE_FIRMWARE(api) \
+       IWL_SNJ_A_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api)          \
        IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode"
 #define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
        IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
@@ -238,6 +241,44 @@ const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg = {
        .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
 };
 
+const struct iwl_cfg_trans_params iwl_snj_trans_cfg = {
+       .mq_rx_supported = true,
+       .use_tfh = true,
+       .rf_id = true,
+       .gen2 = true,
+       .device_family = IWL_DEVICE_FAMILY_AX210,
+       .base_params = &iwl_ax210_base_params,
+       .umac_prph_offset = 0x300000,
+};
+
+const struct iwl_cfg_trans_params iwl_so_trans_cfg = {
+       .mq_rx_supported = true,
+       .use_tfh = true,
+       .rf_id = true,
+       .gen2 = true,
+       .device_family = IWL_DEVICE_FAMILY_AX210,
+       .base_params = &iwl_ax210_base_params,
+       .umac_prph_offset = 0x300000,
+       .integrated = true,
+       /* TODO: the following values need to be checked */
+       .xtal_latency = 500,
+       .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
+};
+
+const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
+       .mq_rx_supported = true,
+       .use_tfh = true,
+       .rf_id = true,
+       .gen2 = true,
+       .device_family = IWL_DEVICE_FAMILY_AX210,
+       .base_params = &iwl_ax210_base_params,
+       .umac_prph_offset = 0x300000,
+       .integrated = true,
+       /* TODO: the following values need to be checked */
+       .xtal_latency = 12000,
+       .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
+};
+
 /*
  * If the device doesn't support HE, no need to have that many buffers.
  * 22000 devices can split multiple frames into a single RB, so fewer are
@@ -606,9 +647,15 @@ const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
 };
 
-const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = {
-       .name = iwl_ax201_name,
-       .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+const struct iwl_cfg iwl_cfg_snj_hr_b0 = {
+       .fw_name_pre = IWL_SNJ_A_HR_B_FW_PRE,
+       .uhb_supported = true,
+       IWL_DEVICE_AX210,
+       .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_snj_a0_jf_b0 = {
+       .fw_name_pre = IWL_SNJ_A_JF_B_FW_PRE,
        .uhb_supported = true,
        IWL_DEVICE_AX210,
        .num_rbds = IWL_NUM_RBDS_AX210_HE,
@@ -650,6 +697,7 @@ MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
index b472f08..d299bba 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -52,6 +52,12 @@ enum iwl_data_path_subcmd_ids {
         */
        CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14,
 
+       /**
+        * @MONITOR_NOTIF: Datapath monitoring notification, using
+        *      &struct iwl_datapath_monitor_notif
+        */
+       MONITOR_NOTIF = 0xF4,
+
        /**
         * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
         */
@@ -153,4 +159,14 @@ struct iwl_channel_estimation_cfg {
        __le64 frame_types;
 } __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */
 
+enum iwl_datapath_monitor_notif_type {
+       IWL_DP_MON_NOTIF_TYPE_EXT_CCA,
+};
+
+struct iwl_datapath_monitor_notif {
+       __le32 type;
+       u8 mac_id;
+       u8 reserved[3];
+} __packed; /* MONITOR_NTF_API_S_VER_1 */
+
 #endif /* __iwl_fw_api_datapath_h__ */
index ace0ef4..8adccd5 100644 (file)
@@ -185,6 +185,21 @@ struct iwl_shared_mem_cfg {
        __le32 rxfifo2_control_size;
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_4 */
 
+/**
+ * struct iwl_mfuart_load_notif_v1 - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwl_mfuart_load_notif_v1 {
+       __le32 installed_ver;
+       __le32 external_ver;
+       __le32 status;
+       __le32 duration;
+} __packed; /* MFU_LOADER_NTFY_API_S_VER_1 */
+
 /**
  * struct iwl_mfuart_load_notif - mfuart image version & status
  * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
index 821ed47..2c74db8 100644 (file)
@@ -140,7 +140,8 @@ enum iwl_rx_phy_flags {
  * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
  * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension
  *     algorithm
- * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC: this frame is protected using
+ *     CMAC or GMAC
  * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
  * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
  * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
@@ -167,7 +168,7 @@ enum iwl_mvm_rx_status {
        RX_MPDU_RES_STATUS_SEC_CCM_ENC                  = (2 << 8),
        RX_MPDU_RES_STATUS_SEC_TKIP_ENC                 = (3 << 8),
        RX_MPDU_RES_STATUS_SEC_EXT_ENC                  = (4 << 8),
-       RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC             = (6 << 8),
+       RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC            = (6 << 8),
        RX_MPDU_RES_STATUS_SEC_ENC_ERR                  = (7 << 8),
        RX_MPDU_RES_STATUS_SEC_ENC_MSK                  = (7 << 8),
        RX_MPDU_RES_STATUS_DEC_DONE                     = BIT(11),
@@ -239,6 +240,8 @@ enum iwl_rx_mpdu_status {
        IWL_RX_MPDU_STATUS_ICV_OK               = BIT(5),
        IWL_RX_MPDU_STATUS_MIC_OK               = BIT(6),
        IWL_RX_MPDU_RES_STATUS_TTAK_OK          = BIT(7),
+       /* overlayed since IWL_UCODE_TLV_API_DEPRECATE_TTAK */
+       IWL_RX_MPDU_STATUS_REPLAY_ERROR         = BIT(7),
        IWL_RX_MPDU_STATUS_SEC_MASK             = 0x7 << 8,
        IWL_RX_MPDU_STATUS_SEC_UNKNOWN          = IWL_RX_MPDU_STATUS_SEC_MASK,
        IWL_RX_MPDU_STATUS_SEC_NONE             = 0x0 << 8,
index b2d8ccf..95038b1 100644 (file)
@@ -238,7 +238,7 @@ struct iwl_tx_cmd {
        __le16 pm_frame_timeout;
        __le16 reserved4;
        u8 payload[0];
-       struct ieee80211_hdr hdr[];
+       struct ieee80211_hdr hdr[0];
 } __packed; /* TX_CMD_API_S_VER_6 */
 
 struct iwl_dram_sec_info {
@@ -855,6 +855,32 @@ struct iwl_tx_path_flush_cmd {
        __le16 reserved;
 } __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
 
+#define IWL_TX_FLUSH_QUEUE_RSP 16
+
+/**
+ * struct iwl_flush_queue_info - virtual flush queue info
+ * @queue_num: virtual queue id
+ * @read_before_flush: read pointer before flush
+ * @read_after_flush: read pointer after flush
+ */
+struct iwl_flush_queue_info {
+       __le16 tid;
+       __le16 queue_num;
+       __le16 read_before_flush;
+       __le16 read_after_flush;
+} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @num_flushed_queues: number of queues in queues array
+ * @queues: all flushed queues
+ */
+struct iwl_tx_path_flush_cmd_rsp {
+       __le16 sta_id;
+       __le16 num_flushed_queues;
+       struct iwl_flush_queue_info queues[IWL_TX_FLUSH_QUEUE_RSP];
+} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
+
 /* Available options for the SCD_QUEUE_CFG HCMD */
 enum iwl_scd_cfg_actions {
        SCD_CFG_DISABLE_QUEUE           = 0x0,
index 0f0a672..1fdf80f 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
@@ -1157,10 +1157,7 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
 static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
                                     void *range_ptr, int idx)
 {
-       /* increase idx by 1 since the pages are from 1 to
-        * fwrt->num_of_paging_blk + 1
-        */
-       struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block;
+       struct page *page = fwrt->fw_paging_db[idx].fw_paging_block;
        struct iwl_fw_ini_error_dump_range *range = range_ptr;
        dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
        u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
@@ -1183,6 +1180,9 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
        struct iwl_fw_ini_error_dump_range *range;
        u32 page_size;
 
+       /* all paged index start from 1 to skip CSS section */
+       idx++;
+
        if (!fwrt->trans->trans_cfg->gen2)
                return _iwl_dump_ini_paging_iter(fwrt, range_ptr, idx);
 
@@ -1684,8 +1684,12 @@ static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
 static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt,
                                      struct iwl_dump_ini_region_data *reg_data)
 {
-       if (fwrt->trans->trans_cfg->gen2)
-               return fwrt->trans->init_dram.paging_cnt;
+       if (fwrt->trans->trans_cfg->gen2) {
+               if (fwrt->trans->init_dram.paging_cnt)
+                       return fwrt->trans->init_dram.paging_cnt - 1;
+               else
+                       return 0;
+       }
 
        return fwrt->num_of_paging_blk;
 }
@@ -1750,15 +1754,13 @@ iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
        u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range);
        u32 size = sizeof(struct iwl_fw_ini_error_dump);
 
-       if (fwrt->trans->trans_cfg->gen2) {
-               for (i = 0; i < iwl_dump_ini_paging_ranges(fwrt, reg_data); i++)
-                       size += range_header_len +
-                               fwrt->trans->init_dram.paging[i].size;
-       } else {
-               for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data);
-                    i++)
-                       size += range_header_len +
-                               fwrt->fw_paging_db[i].fw_paging_size;
+       /* start from 1 to skip CSS section */
+       for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg_data); i++) {
+               size += range_header_len;
+               if (fwrt->trans->trans_cfg->gen2)
+                       size += fwrt->trans->init_dram.paging[i].size;
+               else
+                       size += fwrt->fw_paging_db[i].fw_paging_size;
        }
 
        return size;
index 597bc88..e706881 100644 (file)
@@ -93,6 +93,7 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_FW_RECOVERY_INFO  = 57,
        IWL_UCODE_TLV_HW_TYPE                   = 58,
        IWL_UCODE_TLV_FW_FSEQ_VERSION           = 60,
+       IWL_UCODE_TLV_PHY_INTEGRATION_VERSION   = 61,
 
        IWL_UCODE_TLV_PNVM_VERSION              = 62,
        IWL_UCODE_TLV_PNVM_SKU                  = 64,
@@ -439,6 +440,8 @@ enum iwl_ucode_tlv_capa {
         */
        IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT             = (__force iwl_ucode_tlv_capa_t)98,
 
+       IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT                = (__force iwl_ucode_tlv_capa_t)100,
+
        NUM_IWL_UCODE_TLV_CAPA
 #ifdef __CHECKER__
                /* sparse says it cannot increment the previous enum member */
index c93d247..1dee471 100644 (file)
@@ -219,6 +219,9 @@ struct iwl_fw {
        u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
 
        struct iwl_fw_dbg dbg;
+
+       u8 *phy_integration_ver;
+       u32 phy_integration_ver_len;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
index e317b05..986913f 100644 (file)
@@ -36,11 +36,13 @@ IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
 void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
 {
        iwl_fw_suspend_timestamp(fwrt);
+       iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
 
 void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
 {
+       iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END, NULL);
        iwl_fw_resume_timestamp(fwrt);
 }
 IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
index 86e1d57..41d74a8 100644 (file)
@@ -418,6 +418,7 @@ struct iwl_cfg {
 #define IWL_CFG_MAC_TYPE_QU            0x33
 #define IWL_CFG_MAC_TYPE_QUZ           0x35
 #define IWL_CFG_MAC_TYPE_QNJ           0x36
+#define IWL_CFG_MAC_TYPE_SO            0x37
 #define IWL_CFG_MAC_TYPE_SNJ           0x42
 #define IWL_CFG_MAC_TYPE_MA            0x44
 
@@ -473,6 +474,9 @@ extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_snj_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg;
 extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
 extern const char iwl9162_name[];
 extern const char iwl9260_name[];
@@ -600,7 +604,8 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
 extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
 extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
 extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
-extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_snj_a0_jf_b0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
 extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
 extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
index d44bc61..263c3c0 100644 (file)
@@ -127,6 +127,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
        kfree(drv->fw.dbg.mem_tlv);
        kfree(drv->fw.iml);
        kfree(drv->fw.ucode_capa.cmd_versions);
+       kfree(drv->fw.phy_integration_ver);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -1143,6 +1144,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        capa->n_cmd_versions =
                                tlv_len / sizeof(struct iwl_fw_cmd_version);
                        break;
+               case IWL_UCODE_TLV_PHY_INTEGRATION_VERSION:
+                       if (drv->fw.phy_integration_ver) {
+                               IWL_ERR(drv,
+                                       "phy integration str ignored, already exists\n");
+                               break;
+                       }
+
+                       drv->fw.phy_integration_ver =
+                               kmemdup(tlv_data, tlv_len, GFP_KERNEL);
+                       if (!drv->fw.phy_integration_ver)
+                               return -ENOMEM;
+                       drv->fw.phy_integration_ver_len = tlv_len;
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
index c210627..43a04bc 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
  * Copyright (C) 2015 Intel Mobile Communications GmbH
  */
 #include <linux/types.h>
@@ -711,9 +711,8 @@ void iwl_init_ht_hw_capab(struct iwl_trans *trans,
        if (cfg->ht_params->ldpc)
                ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
 
-       if ((trans->trans_cfg->mq_rx_supported &&
-            iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
-            iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+       if (trans->trans_cfg->mq_rx_supported ||
+           iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
                ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
 
        ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
index 2b7ef15..7819a70 100644 (file)
@@ -446,3 +446,39 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
        return err < 0 ? err : 0;
 }
 IWL_EXPORT_SYMBOL(iwl_finish_nic_init);
+
+void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
+                                 u32 sw_err_bit)
+{
+       unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+       bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
+
+       /* if the interrupts were already disabled, there is no point in
+        * calling iwl_disable_interrupts
+        */
+       if (interrupts_enabled)
+               iwl_trans_interrupts(trans, false);
+
+       iwl_force_nmi(trans);
+       while (time_after(timeout, jiffies)) {
+               u32 inta_hw = iwl_read32(trans, inta_addr);
+
+               /* Error detected by uCode */
+               if (inta_hw & sw_err_bit) {
+                       /* Clear causes register */
+                       iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
+                       break;
+               }
+
+               mdelay(1);
+       }
+
+       /* enable interrupts only if there were already enabled before this
+        * function to avoid a case were the driver enable interrupts before
+        * proper configurations were made
+        */
+       if (interrupts_enabled)
+               iwl_trans_interrupts(trans, true);
+
+       iwl_trans_fw_error(trans);
+}
index 1158e25..3ce77e4 100644 (file)
@@ -365,6 +365,7 @@ enum {
 /* device family 22000 WPROT register */
 #define PREG_PRPH_WPROT_22000          0xA04D00
 
+#define SB_MODIFY_CFG_FLAG             0xA03088
 #define SB_CPU_1_STATUS                        0xA01E30
 #define SB_CPU_2_STATUS                        0xA01E34
 #define UMAG_SB_CPU_1_STATUS           0xA038C0
index cc76826..f098545 100644 (file)
@@ -102,6 +102,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
                return NULL;
        }
 
+       /* Initialize the wait queue for commands */
+       init_waitqueue_head(&trans->wait_command_queue);
+
        return trans;
 }
 
@@ -130,6 +133,19 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
                     test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
                return -ERFKILL;
 
+       /*
+        * We can't test IWL_MVM_STATUS_IN_D3 in mvm->status because this
+        * bit is set early in the D3 flow, before we send all the commands
+        * that configure the firmware for D3 operation (power, patterns, ...)
+        * and we don't want to flag all those with CMD_SEND_IN_D3.
+        * So use the system_pm_mode instead. The only command sent after
+        * we set system_pm_mode is D3_CONFIG_CMD, which we now flag with
+        * CMD_SEND_IN_D3.
+        */
+       if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+                    !(cmd->flags & CMD_SEND_IN_D3)))
+               return -EHOSTDOWN;
+
        if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
                return -EIO;
 
@@ -148,7 +164,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
        if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
                cmd->id = DEF_ID(cmd->id);
 
-       ret = trans->ops->send_cmd(trans, cmd);
+       ret = iwl_trans_txq_send_hcmd(trans, cmd);
 
        if (!(cmd->flags & CMD_ASYNC))
                lock_map_release(&trans->sync_cmd_lockdep_map);
index 2d65bb8..3348d58 100644 (file)
@@ -107,12 +107,16 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  *     the response. The caller needs to call iwl_free_resp when done.
  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
  *     called after this command completes. Valid only with CMD_ASYNC.
+ * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
+ *     SUSPEND and RESUME commands. We are in D3 mode when we set
+ *     trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
  */
 enum CMD_MODE {
        CMD_ASYNC               = BIT(0),
        CMD_WANT_SKB            = BIT(1),
        CMD_SEND_IN_RFKILL      = BIT(2),
        CMD_WANT_ASYNC_CALLBACK = BIT(3),
+       CMD_SEND_IN_D3          = BIT(4),
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -514,6 +518,7 @@ struct iwl_trans_rxq_dma_data {
  *     of the trans debugfs
  * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
  *     context info.
+ * @interrupts: disable/enable interrupts to transport
  */
 struct iwl_trans_ops {
 
@@ -579,14 +584,13 @@ struct iwl_trans_ops {
                                   unsigned long *flags);
        void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
                              u32 value);
-       int  (*suspend)(struct iwl_trans *trans);
-       void (*resume)(struct iwl_trans *trans);
 
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
                                                 u32 dump_mask);
        void (*debugfs_cleanup)(struct iwl_trans *trans);
        void (*sync_nmi)(struct iwl_trans *trans);
        int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
+       void (*interrupts)(struct iwl_trans *trans, bool enable);
 };
 
 /**
@@ -914,6 +918,7 @@ struct iwl_trans_txqs {
  * @pm_support: set to true in start_hw if link pm is supported
  * @ltr_enabled: set to true if the LTR is enabled
  * @wide_cmd_header: true when ucode supports wide command header format
+ * @wait_command_queue: wait queue for sync commands
  * @num_rx_queues: number of RX queues allocated by the transport;
  *     the transport must set this before calling iwl_drv_start()
  * @iml_len: the length of the image loader
@@ -957,6 +962,7 @@ struct iwl_trans {
        int command_groups_size;
        bool wide_cmd_header;
 
+       wait_queue_head_t wait_command_queue;
        u8 num_rx_queues;
 
        size_t iml_len;
@@ -1073,20 +1079,6 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
        return trans->ops->d3_resume(trans, status, test, reset);
 }
 
-static inline int iwl_trans_suspend(struct iwl_trans *trans)
-{
-       if (!trans->ops->suspend)
-               return 0;
-
-       return trans->ops->suspend(trans);
-}
-
-static inline void iwl_trans_resume(struct iwl_trans *trans)
-{
-       if (trans->ops->resume)
-               trans->ops->resume(trans);
-}
-
 static inline struct iwl_trans_dump_data *
 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
 {
@@ -1409,6 +1401,9 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
                trans->ops->sync_nmi(trans);
 }
 
+void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
+                                 u32 sw_err_bit);
+
 static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
                                     const void *data, u32 len)
 {
@@ -1430,6 +1425,12 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
                trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
 }
 
+static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
+{
+       if (trans->ops->interrupts)
+               trans->ops->interrupts(trans, enable);
+}
+
 /*****************************************************
  * transport helper functions
  *****************************************************/
index 64c10ca..acb3062 100644 (file)
@@ -975,7 +975,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        };
        struct iwl_host_cmd d3_cfg_cmd = {
                .id = D3_CONFIG_CMD,
-               .flags = CMD_WANT_SKB,
+               .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
                .data[0] = &d3_cfg_cmd_data,
                .len[0] = sizeof(d3_cfg_cmd_data),
        };
@@ -997,6 +997,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
 
+       synchronize_net();
+
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif)) {
                ret = 1;
@@ -1065,6 +1067,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
                iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
 
+       mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
        /* must be last -- this switches firmware state */
        ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
        if (ret)
@@ -1103,19 +1107,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct iwl_trans *trans = mvm->trans;
-       int ret;
 
        iwl_mvm_pause_tcm(mvm, true);
 
        iwl_fw_runtime_suspend(&mvm->fwrt);
 
-       ret = iwl_trans_suspend(trans);
-       if (ret)
-               return ret;
-
-       trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
        return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
@@ -2050,9 +2046,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto err;
        }
 
-       iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_END,
-                              NULL);
-
        ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
        if (ret)
                goto err;
@@ -2065,7 +2058,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        if (d0i3_first) {
                struct iwl_host_cmd cmd = {
                        .id = D0I3_END_CMD,
-                       .flags = CMD_WANT_SKB,
+                       .flags = CMD_WANT_SKB | CMD_SEND_IN_D3,
                };
                int len;
 
@@ -2098,6 +2091,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                }
        }
 
+       mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
        /*
         * Query the current location and source from the D3 firmware so we
         * can play it back when we re-intiailize the D0 firmware
@@ -2171,8 +2166,6 @@ out:
 
 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
 {
-       iwl_trans_resume(mvm->trans);
-
        return __iwl_mvm_resume(mvm, false);
 }
 
@@ -2183,8 +2176,6 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
 
        ret = iwl_mvm_resume_d3(mvm);
 
-       mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
        iwl_mvm_resume_tcm(mvm);
 
        iwl_fw_runtime_resume(&mvm->fwrt);
@@ -2210,10 +2201,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
 
        file->private_data = inode->i_private;
 
-       synchronize_net();
-
-       mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
-
        iwl_mvm_pause_tcm(mvm, true);
 
        iwl_fw_runtime_suspend(&mvm->fwrt);
@@ -2283,8 +2270,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
 
        iwl_fw_runtime_resume(&mvm->fwrt);
 
-       mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
-
        iwl_abort_notification_waits(&mvm->notif_wait);
        if (!unified_image) {
                int remaining_time = 10;
index 80f848a..efc9082 100644 (file)
@@ -91,7 +91,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                                    "FLUSHING all tids queues on sta_id = %d\n",
                                    flush_arg);
                mutex_lock(&mvm->mutex);
-               ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF, 0)
+               ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFFFF)
                        ? : count;
                mutex_unlock(&mvm->mutex);
                return ret;
@@ -101,7 +101,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
                            flush_arg);
 
        mutex_lock(&mvm->mutex);
-       ret =  iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
+       ret =  iwl_mvm_flush_tx_path(mvm, flush_arg) ? : count;
        mutex_unlock(&mvm->mutex);
 
        return ret;
@@ -712,6 +712,30 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
        return ret;
 }
 
+static ssize_t iwl_dbgfs_phy_integration_ver_read(struct file *file,
+                                                 char __user *user_buf,
+                                                 size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char *buf;
+       size_t bufsz;
+       int pos;
+       ssize_t ret;
+
+       bufsz = mvm->fw->phy_integration_ver_len + 2;
+       buf = kmalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos = scnprintf(buf, bufsz, "%.*s\n", mvm->fw->phy_integration_ver_len,
+                       mvm->fw->phy_integration_ver);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+       kfree(buf);
+       return ret;
+}
+
 #define PRINT_STATS_LE32(_struct, _memb)                               \
                         pos += scnprintf(buf + pos, bufsz - pos,       \
                                          fmt_table, #_memb,            \
@@ -1117,24 +1141,22 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
                                             char *buf, size_t count,
                                             loff_t *ppos)
 {
+       struct iwl_op_mode *opmode = container_of((void *)mvm,
+                                                 struct iwl_op_mode,
+                                                 op_mode_specific);
        struct iwl_rx_cmd_buffer rxb = {
                ._rx_page_order = 0,
                .truesize = 0, /* not used */
                ._offset = 0,
        };
        struct iwl_rx_packet *pkt;
-       struct iwl_rx_mpdu_desc *desc;
        int bin_len = count / 2;
        int ret = -EINVAL;
-       size_t mpdu_cmd_hdr_size = (mvm->trans->trans_cfg->device_family >=
-                                   IWL_DEVICE_FAMILY_AX210) ?
-               sizeof(struct iwl_rx_mpdu_desc) :
-               IWL_RX_DESC_SIZE_V1;
 
        if (!iwl_mvm_firmware_running(mvm))
                return -EIO;
 
-       /* supporting only 9000 descriptor */
+       /* supporting only MQ RX */
        if (!mvm->trans->trans_cfg->mq_rx_supported)
                return -ENOTSUPP;
 
@@ -1147,23 +1169,13 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
        if (ret)
                goto out;
 
-       /* avoid invalid memory access */
-       if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
-               goto out;
-
-       /* check this is RX packet */
-       if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
-           WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
-               goto out;
-
-       /* check the length in metadata matches actual received length */
-       desc = (void *)pkt->data;
-       if (le16_to_cpu(desc->mpdu_len) !=
-           (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
+       /* avoid invalid memory access and malformed packet */
+       if (bin_len < sizeof(*pkt) ||
+           bin_len != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt))
                goto out;
 
        local_bh_disable();
-       iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+       iwl_mvm_rx_mq(opmode, NULL, &rxb);
        local_bh_enable();
        ret = 0;
 
@@ -1337,6 +1349,24 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        return count;
 }
 
+static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
+                                             char *buf, size_t count,
+                                             loff_t *ppos)
+{
+       u32 timepoint;
+
+       if (kstrtou32(buf, 0, &timepoint))
+               return -EINVAL;
+
+       if (timepoint == IWL_FW_INI_TIME_POINT_INVALID ||
+           timepoint >= IWL_FW_INI_TIME_POINT_NUM)
+               return -EINVAL;
+
+       iwl_dbg_tlv_time_point(&mvm->fwrt, timepoint, NULL);
+
+       return count;
+}
+
 #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1766,6 +1796,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
 MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
 MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
 MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
+MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
 MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
@@ -1773,6 +1804,7 @@ MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(dbg_time_point, 64);
 MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
                           (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
 MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
@@ -1978,6 +2010,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
        MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200);
        MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200);
+
+       if (mvm->fw->phy_integration_ver)
+               MVM_DEBUGFS_ADD_FILE(phy_integration_ver, mvm->debugfs_dir, 0400);
 #ifdef CONFIG_ACPI
        MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
 #endif
index 313e9f1..6a6567a 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include <net/mac80211.h>
 #include <linux/netdevice.h>
+#include <linux/dmi.h>
 
 #include "iwl-trans.h"
 #include "iwl-op-mode.h"
@@ -633,6 +634,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
                                   iwl_wait_phy_db_entry,
                                   mvm->phy_db);
 
+       iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
+
        /* Will also start the device */
        ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
        if (ret) {
@@ -997,6 +1000,8 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
                return 0;
        }
 
+       ppag_table.v1.enabled = mvm->fwrt.ppag_table.v1.enabled;
+
        cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
                                        PER_PLATFORM_ANT_GAIN_CMD,
                                        IWL_FW_CMD_VER_UNKNOWN);
@@ -1039,6 +1044,29 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
        return ret;
 }
 
+static const struct dmi_system_id dmi_ppag_approved_list[] = {
+       { .ident = "HP",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+               },
+       },
+       { .ident = "SAMSUNG",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
+               },
+       },
+       { .ident = "MSFT",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+               },
+       },
+       { .ident = "ASUS",
+         .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+               },
+       },
+};
+
 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
 {
        int ret;
@@ -1050,6 +1078,15 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
                                ret);
                return 0;
        }
+
+       if (!dmi_check_system(dmi_ppag_approved_list)) {
+               IWL_DEBUG_RADIO(mvm,
+                               "System vendor '%s' is not in the approved list, disabling PPAG.\n",
+                               dmi_get_system_info(DMI_SYS_VENDOR));
+               mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
+               return 0;
+       }
+
        return iwl_mvm_ppag_send_cmd(mvm);
 }
 
@@ -1315,8 +1352,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
-
        mvm->rfkill_safe_init_done = false;
        ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
        if (ret)
index 9078fcb..fd5e089 100644 (file)
@@ -1289,6 +1289,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
        struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
        struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data;
        struct ieee80211_vif *csa_vif;
@@ -1304,6 +1305,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                struct iwl_mvm_tx_resp *beacon_notify_hdr =
                        &beacon_v5->beacon_notify_hdr;
 
+               if (unlikely(pkt_len < sizeof(*beacon_v5)))
+                       return;
+
                mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0;
                agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
                status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
@@ -1314,6 +1318,9 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                             mvm->ap_last_beacon_gp2,
                             le32_to_cpu(beacon_notify_hdr->initial_rate));
        } else {
+               if (unlikely(pkt_len < sizeof(*beacon)))
+                       return;
+
                mvm->ibss_manager = beacon->ibss_mgr_status != 0;
                status = le32_to_cpu(beacon->status) & TX_STATUS_MSK;
                IWL_DEBUG_RX(mvm,
@@ -1419,12 +1426,13 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
                                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
        struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
        struct ieee80211_rx_status rx_status;
        struct sk_buff *skb;
        u32 size = le32_to_cpu(sb->byte_count);
 
-       if (size == 0)
+       if (size == 0 || pkt_len < struct_size(sb, data, size))
                return;
 
        skb = alloc_skb(size, GFP_ATOMIC);
@@ -1460,14 +1468,10 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_probe_resp_data_notif *notif = (void *)pkt->data;
        struct iwl_probe_resp_data *old_data, *new_data;
-       int len = iwl_rx_packet_payload_len(pkt);
        u32 id = le32_to_cpu(notif->mac_id);
        struct ieee80211_vif *vif;
        struct iwl_mvm_vif *mvmvif;
 
-       if (WARN_ON_ONCE(len < sizeof(*notif)))
-               return;
-
        IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n",
                       notif->noa_active, notif->csa_counter);
 
@@ -1514,12 +1518,8 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
        struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
        struct ieee80211_vif *csa_vif, *vif;
        struct iwl_mvm_vif *mvmvif;
-       int len = iwl_rx_packet_payload_len(pkt);
        u32 id_n_color, csa_id, mac_id;
 
-       if (WARN_ON_ONCE(len < sizeof(*notif)))
-               return;
-
        id_n_color = le32_to_cpu(notif->id_and_color);
        mac_id = id_n_color & FW_CTXT_ID_MSK;
 
index bcbd77e..aecf91d 100644 (file)
@@ -472,6 +472,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa;
        }
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT))
+               wiphy_ext_feature_set(hw->wiphy,
+                                     NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT);
+
        ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
        hw->wiphy->features |=
                NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
@@ -816,8 +821,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
        rcu_read_lock();
        do {
                while (likely(!mvmtxq->stopped &&
-                             (mvm->trans->system_pm_mode ==
-                              IWL_PLAT_PM_MODE_DISABLED))) {
+                             !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
                        skb = ieee80211_tx_dequeue(hw, txq);
 
                        if (!skb) {
@@ -1368,15 +1372,13 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
 
 static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
 {
-       struct iwl_mvm *mvm;
        struct iwl_mvm_vif *mvmvif;
        struct ieee80211_vif *vif;
 
        mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
        vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
-       mvm = mvmvif->mvm;
 
-       iwl_mvm_abort_channel_switch(mvm->hw, vif);
+       /* Trigger disconnect (should clear the CSA state) */
        ieee80211_chswitch_done(vif, false);
 }
 
@@ -2404,12 +2406,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                        IWL_ERR(mvm, "failed to update power mode\n");
        }
 
-       if (changes & BSS_CHANGED_TXPOWER) {
-               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
-                               bss_conf->txpower);
-               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
-       }
-
        if (changes & BSS_CHANGED_CQM) {
                IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
                /* reset cqm events tracking */
@@ -2641,12 +2637,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
            iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
                IWL_WARN(mvm, "Failed updating beacon data\n");
 
-       if (changes & BSS_CHANGED_TXPOWER) {
-               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
-                               bss_conf->txpower);
-               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
-       }
-
        if (changes & BSS_CHANGED_FTM_RESPONDER) {
                int ret = iwl_mvm_ftm_start_responder(mvm, vif);
 
@@ -2686,6 +2676,12 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
                WARN_ON_ONCE(1);
        }
 
+       if (changes & BSS_CHANGED_TXPOWER) {
+               IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n",
+                               bss_conf->txpower);
+               iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+       }
+
        mutex_unlock(&mvm->mutex);
 }
 
@@ -3009,6 +3005,39 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
        mvmvif->he_ru_2mhz_block = !iter_data.tolerated;
 }
 
+static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm,
+                                              struct ieee80211_vif *vif)
+{
+       struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *he_cap;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (!mvm->cca_40mhz_workaround)
+               return;
+
+       /* decrement and check that we reached zero */
+       mvm->cca_40mhz_workaround--;
+       if (mvm->cca_40mhz_workaround)
+               return;
+
+       sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+       sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(vif));
+
+       if (he_cap) {
+               /* we know that ours is writable */
+               struct ieee80211_sta_he_cap *he = (void *)he_cap;
+
+               he->he_cap_elem.phy_cap_info[0] |=
+                       IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+       }
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                                 struct ieee80211_vif *vif,
                                 struct ieee80211_sta *sta,
@@ -3048,6 +3077,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                 * No need to make sure deferred TX indication is off since the
                 * worker will already remove it if it was on
                 */
+
+               /*
+                * Additionally, reset the 40 MHz capability if we disconnected
+                * from the AP now.
+                */
+               iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
        }
 
        mutex_lock(&mvm->mutex);
@@ -3389,6 +3424,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 
        switch (cmd) {
        case SET_KEY:
+               if (keyidx == 6 || keyidx == 7)
+                       rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6],
+                                          key);
+
                if ((vif->type == NL80211_IFTYPE_ADHOC ||
                     vif->type == NL80211_IFTYPE_AP) && !sta) {
                        /*
@@ -3497,6 +3536,10 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 
                break;
        case DISABLE_KEY:
+               if (keyidx == 6 || keyidx == 7)
+                       RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6],
+                                        NULL);
+
                ret = -ENOENT;
                for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) {
                        if (mvmvif->ap_early_keys[i] == key) {
@@ -4648,7 +4691,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
                if (drop) {
                        mutex_lock(&mvm->mutex);
                        iwl_mvm_flush_tx_path(mvm,
-                               iwl_mvm_flushable_queues(mvm) & queues, 0);
+                               iwl_mvm_flushable_queues(mvm) & queues);
                        mutex_unlock(&mvm->mutex);
                } else {
                        iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
@@ -4666,7 +4709,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
                        continue;
 
                if (drop)
-                       iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF, 0);
+                       iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF);
                else
                        iwl_mvm_wait_sta_queues_empty(mvm,
                                        iwl_mvm_sta_from_mac80211(sta));
@@ -4948,6 +4991,34 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
+static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
+                                           struct ieee80211_vif *vif,
+                                           const  struct ieee80211_mlme_event *mlme)
+{
+       if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED ||
+                                         mlme->status == MLME_TIMEOUT)) {
+               iwl_dbg_tlv_time_point(&mvm->fwrt,
+                                      IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
+                                      NULL);
+               return;
+       }
+
+       if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED ||
+                                        mlme->status == MLME_TIMEOUT)) {
+               iwl_dbg_tlv_time_point(&mvm->fwrt,
+                                      IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
+                                      NULL);
+               return;
+       }
+
+       if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) {
+               iwl_dbg_tlv_time_point(&mvm->fwrt,
+                                      IWL_FW_INI_TIME_POINT_DEASSOC,
+                                      NULL);
+               return;
+       }
+}
+
 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif,
                                        const struct ieee80211_event *event)
@@ -4962,6 +5033,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
+       if (iwl_trans_dbg_ini_valid(mvm->trans)) {
+               iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme);
+               return;
+       }
+
        trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
                                     FW_DBG_TRIGGER_MLME);
        if (!trig)
index ed0e8b7..308ba2e 100644 (file)
@@ -419,6 +419,10 @@ struct iwl_mvm_vif {
 
        /* 26-tone RU OFDMA transmissions should be blocked */
        bool he_ru_2mhz_block;
+
+       struct {
+               struct ieee80211_key_conf __rcu *keys[2];
+       } bcn_prot;
 };
 
 static inline struct iwl_mvm_vif *
@@ -796,6 +800,8 @@ struct iwl_mvm {
        bool hw_registered;
        bool rfkill_safe_init_done;
 
+       u8 cca_40mhz_workaround;
+
        u32 ampdu_ref;
        bool ampdu_toggle;
 
@@ -1471,10 +1477,9 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
 #else
 static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 #endif
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk);
 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
-int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
-                          u16 tids, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids);
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
@@ -1547,6 +1552,9 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+                  struct napi_struct *napi,
+                  struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                        struct iwl_rx_cmd_buffer *rxb);
@@ -1898,7 +1906,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
 
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
-void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
 void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
                        struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
@@ -1995,6 +2002,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
                                     u32 size);
 void iwl_mvm_reorder_timer_expired(struct timer_list *t);
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid);
 bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
 
 #define MVM_TCM_PERIOD_MSEC 500
index 61618f6..16357c5 100644 (file)
@@ -146,6 +146,70 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
                                       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
 }
 
+static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_datapath_monitor_notif *notif = (void *)pkt->data;
+       struct ieee80211_supported_band *sband;
+       const struct ieee80211_sta_he_cap *he_cap;
+       struct ieee80211_vif *vif;
+
+       if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA))
+               return;
+
+       vif = iwl_mvm_get_vif_by_macid(mvm, notif->mac_id);
+       if (!vif || vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (!vif->bss_conf.chandef.chan ||
+           vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
+           vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+               return;
+
+       if (!vif->bss_conf.assoc)
+               return;
+
+       /* this shouldn't happen *again*, ignore it */
+       if (mvm->cca_40mhz_workaround)
+               return;
+
+       /*
+        * We'll decrement this on disconnect - so set to 2 since we'll
+        * still have to disconnect from the current AP first.
+        */
+       mvm->cca_40mhz_workaround = 2;
+
+       /*
+        * This capability manipulation isn't really ideal, but it's the
+        * easiest choice - otherwise we'd have to do some major changes
+        * in mac80211 to support this, which isn't worth it. This does
+        * mean that userspace may have outdated information, but that's
+        * actually not an issue at all.
+        */
+       sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ];
+
+       WARN_ON(!sband->ht_cap.ht_supported);
+       WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40));
+       sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+       he_cap = ieee80211_get_he_iftype_cap(sband,
+                                            ieee80211_vif_type_p2p(vif));
+
+       if (he_cap) {
+               /* we know that ours is writable */
+               struct ieee80211_sta_he_cap *he = (void *)he_cap;
+
+               WARN_ON(!he->has_he);
+               WARN_ON(!(he->he_cap_elem.phy_cap_info[0] &
+                               IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G));
+               he->he_cap_elem.phy_cap_info[0] &=
+                       ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+       }
+
+       ieee80211_disconnect(vif, true);
+}
+
 /**
  * enum iwl_rx_handler_context context for Rx handler
  * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
@@ -169,15 +233,21 @@ enum iwl_rx_handler_context {
  * @fn: the function is called when notification is received
  */
 struct iwl_rx_handlers {
-       u16 cmd_id;
+       u16 cmd_id, min_size;
        enum iwl_rx_handler_context context;
        void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
-#define RX_HANDLER(_cmd_id, _fn, _context)     \
-       { .cmd_id = _cmd_id, .fn = _fn, .context = _context }
-#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)      \
-       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
+#define RX_HANDLER_NO_SIZE(_cmd_id, _fn, _context)             \
+       { .cmd_id = _cmd_id, .fn = _fn, .context = _context, }
+#define RX_HANDLER_GRP_NO_SIZE(_grp, _cmd, _fn, _context)      \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context, }
+#define RX_HANDLER(_cmd_id, _fn, _context, _struct)            \
+       { .cmd_id = _cmd_id, .fn = _fn,                         \
+         .context = _context, .min_size = sizeof(_struct), }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context, _struct)     \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn,             \
+         .context = _context, .min_size = sizeof(_struct), }
 
 /*
  * Handlers for fw notifications
@@ -187,85 +257,107 @@ struct iwl_rx_handlers {
  * The handler can be one from three contexts, see &iwl_rx_handler_context
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
-       RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+       RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC,
+                  struct iwl_mvm_tx_resp),
+       RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC,
+                  struct iwl_mvm_ba_notif),
 
        RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
-                      iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC,
+                      struct iwl_tlc_update_notif),
 
        RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
-       RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
-       RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
-                  RX_HANDLER_ASYNC_LOCKED),
+                  RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+       RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+                          RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+                          RX_HANDLER_ASYNC_LOCKED),
 
        RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
-                  iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
+                  iwl_mvm_window_status_notif, RX_HANDLER_SYNC,
+                  struct iwl_ba_window_status_notif),
 
        RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
-                  RX_HANDLER_SYNC),
+                  RX_HANDLER_SYNC, struct iwl_time_event_notif),
        RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
-                      iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC,
+                      struct iwl_mvm_session_prot_notif),
        RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
-                  RX_HANDLER_ASYNC_LOCKED),
+                  RX_HANDLER_ASYNC_LOCKED, struct iwl_mcc_chub_notif),
 
-       RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
+       RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC,
+                  struct iwl_mvm_eosp_notification),
 
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
-                  iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+                  iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+                  struct iwl_lmac_scan_complete_notif),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
                   iwl_mvm_rx_lmac_scan_complete_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
-       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
-                  RX_HANDLER_SYNC),
+                  RX_HANDLER_ASYNC_LOCKED, struct iwl_periodic_scan_complete),
+       RX_HANDLER_NO_SIZE(MATCH_FOUND_NOTIFICATION,
+                          iwl_mvm_rx_scan_match_found,
+                          RX_HANDLER_SYNC),
        RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
+                  RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
        RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
-                  iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+                  iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
+                  struct iwl_umac_scan_iter_complete_notif),
 
        RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
-                  RX_HANDLER_SYNC),
+                  RX_HANDLER_SYNC, struct iwl_card_state_notif),
 
        RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
-                  RX_HANDLER_SYNC),
+                  RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
 
-       RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
+       RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
+                  struct iwl_error_resp),
        RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
-                  iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
-       RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
-       RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
-                      iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
+                  iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC,
+                  struct iwl_uapsd_misbehaving_ap_notif),
+       RX_HANDLER_NO_SIZE(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+                          RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER_GRP_NO_SIZE(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+                              iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
        RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
-                      iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC,
+                      struct ct_kill_notif),
 
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
-                  RX_HANDLER_ASYNC_LOCKED),
+                  RX_HANDLER_ASYNC_LOCKED,
+                  struct iwl_tdls_channel_switch_notif),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
-                  RX_HANDLER_SYNC),
+                  RX_HANDLER_SYNC, struct iwl_mfuart_load_notif_v1),
        RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS,
-                      iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED),
+                      iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED,
+                      struct iwl_ftm_responder_stats),
 
-       RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
-                      iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
-       RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF,
-                      iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF,
+                              iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED),
+       RX_HANDLER_GRP_NO_SIZE(LOCATION_GROUP, TOF_LC_NOTIF,
+                              iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED),
 
        RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
-                      iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC,
+                      struct iwl_mfu_assert_dump_notif),
        RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
-                      iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
+                      struct iwl_stored_beacon_notif),
        RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
-                      iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
+                      struct iwl_mu_group_mgmt_notif),
        RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
-                      iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
+                      iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC,
+                      struct iwl_mvm_pm_state_notification),
        RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
                       iwl_mvm_probe_resp_data_notif,
-                      RX_HANDLER_ASYNC_LOCKED),
+                      RX_HANDLER_ASYNC_LOCKED,
+                      struct iwl_probe_resp_data_notif),
        RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF,
                       iwl_mvm_channel_switch_noa_notif,
-                      RX_HANDLER_SYNC),
+                      RX_HANDLER_SYNC, struct iwl_channel_switch_noa_notif),
+       RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
+                      iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
+                      struct iwl_datapath_monitor_notif),
 };
 #undef RX_HANDLER
 #undef RX_HANDLER_GRP
@@ -410,6 +502,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
        HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
        HCMD_NAME(TLC_MNG_CONFIG_CMD),
        HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
+       HCMD_NAME(MONITOR_NOTIF),
        HCMD_NAME(STA_PM_NOTIF),
        HCMD_NAME(MU_GROUP_MGMT_NOTIF),
        HCMD_NAME(RX_QUEUES_NOTIFICATION),
@@ -964,6 +1057,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
                              struct iwl_rx_cmd_buffer *rxb,
                              struct iwl_rx_packet *pkt)
 {
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
        int i;
        union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
 
@@ -985,6 +1079,9 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
                if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
                        continue;
 
+               if (unlikely(pkt_len < rx_h->min_size))
+                       return;
+
                if (rx_h->context == RX_HANDLER_SYNC) {
                        rx_h->fn(mvm, rxb);
                        return;
@@ -1024,9 +1121,9 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
                iwl_mvm_rx_common(mvm, rxb, pkt);
 }
 
-static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
-                         struct napi_struct *napi,
-                         struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+                  struct napi_struct *napi,
+                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
index 490a561..8772b65 100644 (file)
@@ -248,14 +248,13 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
                                 struct iwl_tlc_config_cmd *cmd)
 {
        int i;
-       unsigned long tmp;
-       unsigned long supp; /* must be unsigned long for for_each_set_bit */
+       u16 supp = 0;
+       unsigned long tmp; /* must be unsigned long for for_each_set_bit */
        const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
        const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
 
        /* non HT rates */
-       supp = 0;
        tmp = sta->supp_rates[sband->band];
        for_each_set_bit(i, &tmp, BITS_PER_LONG)
                supp |= BIT(sband->bitrates[i].hw_value);
index f0364ad..8ef5399 100644 (file)
 void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
+
+       if (unlikely(pkt_len < sizeof(mvm->last_phy_info)))
+               return;
 
        memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
        mvm->ampdu_ref++;
@@ -874,12 +878,11 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
        int i;
-       u32 pkt_len = iwl_rx_packet_payload_len(pkt);
 
-       if (WARN_ONCE(pkt_len != sizeof(*notif),
-                     "Received window status notification of wrong size (%u)\n",
-                     pkt_len))
-               return;
+       BUILD_BUG_ON(ARRAY_SIZE(notif->ra_tid) != BA_WINDOW_STREAMS_MAX);
+       BUILD_BUG_ON(ARRAY_SIZE(notif->mpdu_rx_count) != BA_WINDOW_STREAMS_MAX);
+       BUILD_BUG_ON(ARRAY_SIZE(notif->bitmap) != BA_WINDOW_STREAMS_MAX);
+       BUILD_BUG_ON(ARRAY_SIZE(notif->start_seq_num) != BA_WINDOW_STREAMS_MAX);
 
        rcu_read_lock();
        for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
index 4dc7c65..c21736f 100644 (file)
@@ -272,7 +272,72 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
        rx_status->chain_signal[2] = S8_MIN;
 }
 
-static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
+                                 struct ieee80211_hdr *hdr,
+                                 struct iwl_rx_mpdu_desc *desc,
+                                 u32 status)
+{
+       struct iwl_mvm_sta *mvmsta;
+       struct iwl_mvm_vif *mvmvif;
+       u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY);
+       u8 keyid;
+       struct ieee80211_key_conf *key;
+       u32 len = le16_to_cpu(desc->mpdu_len);
+       const u8 *frame = (void *)hdr;
+
+       /*
+        * For non-beacon, we don't really care. But beacons may
+        * be filtered out, and we thus need the firmware's replay
+        * detection, otherwise beacons the firmware previously
+        * filtered could be replayed, or something like that, and
+        * it can filter a lot - though usually only if nothing has
+        * changed.
+        */
+       if (!ieee80211_is_beacon(hdr->frame_control))
+               return 0;
+
+       /* good cases */
+       if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
+                  !(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
+               return 0;
+
+       if (!sta)
+               return -1;
+
+       mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+       /* what? */
+       if (fwkeyid != 6 && fwkeyid != 7)
+               return -1;
+
+       mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+       key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]);
+       if (!key)
+               return -1;
+
+       if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
+               return -1;
+
+       /*
+        * See if the key ID matches - if not this may be due to a
+        * switch and the firmware may erroneously report !MIC_OK.
+        */
+       keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
+       if (keyid != fwkeyid)
+               return -1;
+
+       /* Report status to mac80211 */
+       if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+               ieee80211_key_mic_failure(key);
+       else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
+               ieee80211_key_replay(key);
+
+       return -1;
+}
+
+static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                            struct ieee80211_hdr *hdr,
                             struct ieee80211_rx_status *stats, u16 phy_info,
                             struct iwl_rx_mpdu_desc *desc,
                             u32 pkt_flags, int queue, u8 *crypt_len)
@@ -345,6 +410,8 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
                        return -1;
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
+       case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
+               return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
        default:
                /*
                 * Sometimes we can get frames that were not decrypted
@@ -1567,6 +1634,23 @@ static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
        }
 }
 
+struct iwl_rx_sta_csa {
+       bool all_sta_unblocked;
+       struct ieee80211_vif *vif;
+};
+
+static void iwl_mvm_rx_get_sta_block_tx(void *data, struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_rx_sta_csa *rx_sta_csa = data;
+
+       if (mvmsta->vif != rx_sta_csa->vif)
+               return;
+
+       if (mvmsta->disable_tx)
+               rx_sta_csa->all_sta_unblocked = false;
+}
+
 void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                        struct iwl_rx_cmd_buffer *rxb, int queue)
 {
@@ -1682,15 +1766,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
 
        iwl_mvm_decode_lsig(skb, &phy_data);
 
-       rx_status = IEEE80211_SKB_RXCB(skb);
-
-       if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
-                             le32_to_cpu(pkt->len_n_flags), queue,
-                             &crypt_len)) {
-               kfree_skb(skb);
-               return;
-       }
-
        /*
         * Keep packets with CRC errors (and with overrun) for monitor mode
         * (otherwise the firmware discards them) but mark them as bad.
@@ -1774,6 +1849,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
        }
 
+       if (iwl_mvm_rx_crypto(mvm, sta, hdr, rx_status, phy_info, desc,
+                             le32_to_cpu(pkt->len_n_flags), queue,
+                             &crypt_len)) {
+               kfree_skb(skb);
+               goto out;
+       }
+
        if (sta) {
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                struct ieee80211_vif *tx_blocked_vif =
@@ -1798,10 +1880,24 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                if (unlikely(tx_blocked_vif) && tx_blocked_vif == vif) {
                        struct iwl_mvm_vif *mvmvif =
                                iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+                       struct iwl_rx_sta_csa rx_sta_csa = {
+                               .all_sta_unblocked = true,
+                               .vif = tx_blocked_vif,
+                       };
 
                        if (mvmvif->csa_target_freq == rx_status->freq)
                                iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
                                                                 false);
+                       ieee80211_iterate_stations_atomic(mvm->hw,
+                                                         iwl_mvm_rx_get_sta_block_tx,
+                                                         &rx_sta_csa);
+
+                       if (rx_sta_csa.all_sta_unblocked) {
+                               RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+                               /* Unblock BCAST / MCAST station */
+                               iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+                               cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+                       }
                }
 
                rs_update_last_rssi(mvm, mvmsta, rx_status);
@@ -1938,6 +2034,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
                .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
        };
 
+       if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
+               return;
+
        if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
                return;
 
@@ -2067,6 +2166,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_frame_release *release = (void *)pkt->data;
 
+       if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+               return;
+
        iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
                                          le16_to_cpu(release->nssn),
                                          queue, 0);
@@ -2087,6 +2189,9 @@ void iwl_mvm_rx_bar_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
                                         IWL_BAR_FRAME_RELEASE_TID_MASK);
        struct iwl_mvm_baid_data *baid_data;
 
+       if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*release)))
+               return;
+
        if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
                         baid >= ARRAY_SIZE(mvm->baid_map)))
                return;
index 97d2de8..42e790e 100644 (file)
@@ -2854,12 +2854,19 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
                                .aborted = true,
                        };
 
+                       cancel_delayed_work(&mvm->scan_timeout_dwork);
+
                        ieee80211_scan_completed(mvm->hw, &info);
                        mvm->scan_uid_status[uid] = 0;
                }
                uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
-               if (uid >= 0 && !mvm->fw_restart) {
-                       ieee80211_sched_scan_stopped(mvm->hw);
+               if (uid >= 0) {
+                       /* Sched scan will be restarted by mac80211 in
+                        * restart_hw, so do not report if FW is about to be
+                        * restarted.
+                        */
+                       if (!mvm->fw_restart)
+                               ieee80211_sched_scan_stopped(mvm->hw);
                        mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
                        mvm->scan_uid_status[uid] = 0;
                }
@@ -2889,6 +2896,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
                                .aborted = true,
                        };
 
+                       cancel_delayed_work(&mvm->scan_timeout_dwork);
                        ieee80211_scan_completed(mvm->hw, &info);
                }
 
index 578c353..3a411bb 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2015, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2015, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -3111,11 +3111,11 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
                if (iwl_mvm_has_new_tx_api(mvm)) {
                        if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
-                                                  BIT(tid), 0))
+                                                  BIT(tid)))
                                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
                        iwl_trans_wait_txq_empty(mvm->trans, txq_id);
                } else {
-                       if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+                       if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
                                IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
                        iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
                }
@@ -3310,7 +3310,8 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
 
        /* verify the key details match the required command's expectations */
        if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
-                   (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+                   (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
+                    keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
                    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
                     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
@@ -3359,9 +3360,10 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
                                                       ((u64) pn[0] << 40));
        }
 
-       IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+       IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
                       remove_key ? "removing" : "installing",
-                      igtk_cmd.sta_id);
+                      keyconf->keyidx >= 6 ? "B" : "",
+                      keyconf->keyidx, igtk_cmd.sta_id);
 
        if (!iwl_mvm_has_new_rx_api(mvm)) {
                struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
@@ -3815,7 +3817,7 @@ static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
                                   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
@@ -3829,12 +3831,11 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvm_sta;
        int i;
 
-       lockdep_assert_held(&mvm->mutex);
+       rcu_read_lock();
 
        /* Block/unblock all the stations of the given mvmvif */
        for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
-               sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
-                                               lockdep_is_held(&mvm->mutex));
+               sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
                if (IS_ERR_OR_NULL(sta))
                        continue;
 
@@ -3846,6 +3847,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
                iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
        }
 
+       rcu_read_unlock();
+
        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
                return;
 
index 507625f..790bc68 100644 (file)
@@ -44,7 +44,7 @@ static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
        iwl_mvm_set_hw_ctkill_state(mvm, false);
 }
 
-void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+static void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
 {
        /* ignore the notification if we are in test mode */
        if (mvm->temperature_test)
@@ -156,12 +156,6 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct ct_kill_notif *notif;
-       int len = iwl_rx_packet_payload_len(pkt);
-
-       if (WARN_ON_ONCE(len != sizeof(*notif))) {
-               IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
-               return;
-       }
 
        notif = (struct ct_kill_notif *)pkt->data;
        IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
index 3712adc..871e9da 100644 (file)
@@ -263,17 +263,20 @@ static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
 
 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
                               struct ieee80211_tx_info *info,
-                              struct ieee80211_sta *sta)
+                              struct ieee80211_sta *sta, __le16 fc)
 {
        int rate_idx;
        u8 rate_plcp;
        u32 rate_flags = 0;
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
        /* HT rate doesn't make sense for a non data frame */
-       WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
-                 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
+       WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
+                 !ieee80211_is_data(fc),
+                 "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
                  info->control.rates[0].flags,
-                 info->control.rates[0].idx);
+                 info->control.rates[0].idx,
+                 le16_to_cpu(fc), mvmsta->sta_state);
 
        rate_idx = info->control.rates[0].idx;
        /* if the rate isn't a well known legacy rate, take the lowest one */
@@ -305,7 +308,7 @@ static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
                                       struct ieee80211_tx_info *info,
                                       struct ieee80211_sta *sta, __le16 fc)
 {
-       return iwl_mvm_get_tx_rate(mvm, info, sta) |
+       return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
                iwl_mvm_get_tx_ant(mvm, info, sta, fc);
 }
 
@@ -1324,12 +1327,24 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
 }
 
 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
-                                           u32 status)
+                                           u32 status, __le16 frame_control)
 {
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_tx_status *status_trig;
        int i;
 
+       if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
+               enum iwl_fw_ini_time_point tp =
+                       IWL_FW_INI_TIME_POINT_TX_FAILED;
+
+               if (ieee80211_is_action(frame_control))
+                       tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
+
+               iwl_dbg_tlv_time_point(&mvm->fwrt,
+                                      tp, NULL);
+               return;
+       }
+
        trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
                                     FW_DBG_TRIGGER_TX_STATUS);
        if (!trig)
@@ -1447,7 +1462,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                if (skb_freed > 1)
                        info->flags |= IEEE80211_TX_STAT_ACK;
 
-               iwl_mvm_tx_status_check_trigger(mvm, status);
+               iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
 
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
@@ -1631,10 +1646,13 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
        struct agg_tx_status *frame_status =
                iwl_mvm_get_agg_status(mvm, tx_resp);
        int i;
+       bool tirgger_timepoint = false;
 
        for (i = 0; i < tx_resp->frame_count; i++) {
                u16 fstatus = le16_to_cpu(frame_status[i].status);
-
+               /* In case one frame wasn't transmitted trigger time point */
+               tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
+                                     AGG_TX_STATE_TRANSMITTED);
                IWL_DEBUG_TX_REPLY(mvm,
                                   "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
                                   iwl_get_agg_tx_status(fstatus),
@@ -1643,6 +1661,11 @@ static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
                                        AGG_TX_STATE_TRY_CNT_POS,
                                   le16_to_cpu(frame_status[i].sequence));
        }
+
+       if (tirgger_timepoint)
+               iwl_dbg_tlv_time_point(&mvm->fwrt,
+                                      IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
+
 }
 #else
 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
@@ -1704,7 +1727,8 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 
 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
                               int txq, int index,
-                              struct ieee80211_tx_info *ba_info, u32 rate)
+                              struct ieee80211_tx_info *tx_info, u32 rate,
+                              bool is_flush)
 {
        struct sk_buff_head reclaimed_skbs;
        struct iwl_mvm_tid_data *tid_data = NULL;
@@ -1747,7 +1771,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
                 * frames because before failing a frame the firmware transmits
                 * it without aggregation at least once.
                 */
-               info->flags |= IEEE80211_TX_STAT_ACK;
+               if (!is_flush)
+                       info->flags |= IEEE80211_TX_STAT_ACK;
        }
 
        /*
@@ -1766,7 +1791,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
 
        if (tid_data->txq_id != txq) {
                IWL_ERR(mvm,
-                       "invalid BA notification: Q %d, tid %d\n",
+                       "invalid reclaim request: Q %d, tid %d\n",
                        tid_data->txq_id, tid);
                rcu_read_unlock();
                return;
@@ -1781,26 +1806,28 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
        freed = 0;
 
        /* pack lq color from tid_data along the reduced txp */
-       ba_info->status.status_driver_data[0] =
+       tx_info->status.status_driver_data[0] =
                RS_DRV_DATA_PACK(tid_data->lq_color,
-                                ba_info->status.status_driver_data[0]);
-       ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+                                tx_info->status.status_driver_data[0]);
+       tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
                struct ieee80211_hdr *hdr = (void *)skb->data;
                struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 
-               if (ieee80211_is_data_qos(hdr->frame_control))
-                       freed++;
-               else
-                       WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+               if (!is_flush) {
+                       if (ieee80211_is_data_qos(hdr->frame_control))
+                               freed++;
+                       else
+                               WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+               }
 
                /* this is the first skb we deliver in this batch */
                /* put the rate scaling data there */
                if (freed == 1) {
                        info->flags |= IEEE80211_TX_STAT_AMPDU;
-                       memcpy(&info->status, &ba_info->status,
-                              sizeof(ba_info->status));
+                       memcpy(&info->status, &tx_info->status,
+                              sizeof(tx_info->status));
                        iwl_mvm_hwrate_to_tx_status(rate, info);
                }
        }
@@ -1811,7 +1838,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
         * possible (i.e. first MPDU in the aggregation wasn't acked)
         * Still it's important to update RS about sent vs. acked.
         */
-       if (skb_queue_empty(&reclaimed_skbs)) {
+       if (!is_flush && skb_queue_empty(&reclaimed_skbs)) {
                struct ieee80211_chanctx_conf *chanctx_conf = NULL;
 
                if (mvmsta->vif)
@@ -1821,13 +1848,13 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
                if (WARN_ON_ONCE(!chanctx_conf))
                        goto out;
 
-               ba_info->band = chanctx_conf->def.chan->band;
-               iwl_mvm_hwrate_to_tx_status(rate, ba_info);
+               tx_info->band = chanctx_conf->def.chan->band;
+               iwl_mvm_hwrate_to_tx_status(rate, tx_info);
 
                if (!iwl_mvm_has_tlc_offload(mvm)) {
                        IWL_DEBUG_TX_REPLY(mvm,
                                           "No reclaim. Update rs directly\n");
-                       iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+                       iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
                }
        }
 
@@ -1843,6 +1870,7 @@ out:
 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
        int sta_id, tid, txq, index;
        struct ieee80211_tx_info ba_info = {};
        struct iwl_mvm_ba_notif *ba_notif;
@@ -1855,8 +1883,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                struct iwl_mvm_compressed_ba_notif *ba_res =
                        (void *)pkt->data;
                u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+               u16 tfd_cnt;
                int i;
 
+               if (unlikely(sizeof(*ba_res) > pkt_len))
+                       return;
+
                sta_id = ba_res->sta_id;
                ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
                ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
@@ -1865,8 +1897,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                ba_info.status.status_driver_data[0] =
                        (void *)(uintptr_t)ba_res->reduced_txp;
 
-               if (!le16_to_cpu(ba_res->tfd_cnt))
-                       goto out;
+               tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
+               if (!tfd_cnt || struct_size(ba_res, tfd, tfd_cnt) > pkt_len)
+                       return;
 
                rcu_read_lock();
 
@@ -1881,7 +1914,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                 */
 
                /* Free per TID */
-               for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
+               for (i = 0; i < tfd_cnt; i++) {
                        struct iwl_mvm_compressed_ba_tfd *ba_tfd =
                                &ba_res->tfd[i];
 
@@ -1896,14 +1929,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                                           (int)(le16_to_cpu(ba_tfd->q_num)),
                                           le16_to_cpu(ba_tfd->tfd_index),
                                           &ba_info,
-                                          le32_to_cpu(ba_res->tx_rate));
+                                          le32_to_cpu(ba_res->tx_rate), false);
                }
 
                if (mvmsta)
                        iwl_mvm_tx_airtime(mvm, mvmsta,
                                           le32_to_cpu(ba_res->wireless_time));
                rcu_read_unlock();
-out:
+
                IWL_DEBUG_TX_REPLY(mvm,
                                   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
                                   sta_id, le32_to_cpu(ba_res->flags),
@@ -1939,7 +1972,7 @@ out:
        rcu_read_unlock();
 
        iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
-                          tid_data->rate_n_flags);
+                          tid_data->rate_n_flags, false);
 
        IWL_DEBUG_TX_REPLY(mvm,
                           "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
@@ -1963,7 +1996,7 @@ out:
  * 2) flush the Tx path
  * 3) wait for the transport queues to be empty
  */
-int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
 {
        int ret;
        struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
@@ -1972,29 +2005,89 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
        };
 
        WARN_ON(iwl_mvm_has_new_tx_api(mvm));
-
-       ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+       ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
                                   sizeof(flush_cmd), &flush_cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
        return ret;
 }
 
-int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
-                          u16 tids, u32 flags)
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
 {
        int ret;
+       struct iwl_tx_path_flush_cmd_rsp *rsp;
        struct iwl_tx_path_flush_cmd flush_cmd = {
                .sta_id = cpu_to_le32(sta_id),
                .tid_mask = cpu_to_le16(tids),
        };
 
+       struct iwl_host_cmd cmd = {
+               .id = TXPATH_FLUSH,
+               .len = { sizeof(flush_cmd), },
+               .data = { &flush_cmd, },
+       };
+
        WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
-                                  sizeof(flush_cmd), &flush_cmd);
-       if (ret)
+       if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
+               cmd.flags |= CMD_WANT_SKB;
+
+       IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
+                           sta_id, tids);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+       if (ret) {
                IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+               return ret;
+       }
+
+       if (cmd.flags & CMD_WANT_SKB) {
+               int i;
+               int num_flushed_queues;
+
+               if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
+                       ret = -EIO;
+                       goto free_rsp;
+               }
+
+               rsp = (void *)cmd.resp_pkt->data;
+
+               if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
+                             "sta_id %d != rsp_sta_id %d",
+                             sta_id, le16_to_cpu(rsp->sta_id))) {
+                       ret = -EIO;
+                       goto free_rsp;
+               }
+
+               num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
+               if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
+                             "num_flushed_queues %d", num_flushed_queues)) {
+                       ret = -EIO;
+                       goto free_rsp;
+               }
+
+               for (i = 0; i < num_flushed_queues; i++) {
+                       struct ieee80211_tx_info tx_info = {};
+                       struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
+                       int tid = le16_to_cpu(queue_info->tid);
+                       int read_before = le16_to_cpu(queue_info->read_before_flush);
+                       int read_after = le16_to_cpu(queue_info->read_after_flush);
+                       int queue_num = le16_to_cpu(queue_info->queue_num);
+
+                       if (tid == IWL_MGMT_TID)
+                               tid = IWL_MAX_TID_COUNT;
+
+                       IWL_DEBUG_TX_QUEUES(mvm,
+                                           "tid %d queue_id %d read-before %d read-after %d\n",
+                                           tid, queue_num, read_before, read_after);
+
+                       iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
+                                          &tx_info, 0, true);
+               }
+free_rsp:
+               iwl_free_resp(&cmd);
+       }
        return ret;
 }
 
@@ -2007,10 +2100,10 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
                     offsetof(struct iwl_mvm_sta, sta_id));
 
        if (iwl_mvm_has_new_tx_api(mvm))
-               return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
+               return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff);
 
        if (internal)
-               return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
+               return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk);
 
-       return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
+       return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk);
 }
index ee2e0cb..b6b481f 100644 (file)
@@ -45,8 +45,11 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
        if (cmd->flags & CMD_WANT_SKB)
                return ret;
 
-       /* Silently ignore failures if RFKILL is asserted */
-       if (!ret || ret == -ERFKILL)
+       /*
+        * Silently ignore failures if RFKILL is asserted or
+        * we are in suspend\resume process
+        */
+       if (!ret || ret == -ERFKILL || ret == -EHOSTDOWN)
                return 0;
        return ret;
 }
@@ -496,18 +499,33 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
 static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
 {
        struct iwl_trans *trans = mvm->trans;
-       u32 error;
+       u32 error, data1;
+
+       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+               error = UMAG_SB_CPU_2_STATUS;
+               data1 = UMAG_SB_CPU_1_STATUS;
+       } else if (mvm->trans->trans_cfg->device_family >=
+                  IWL_DEVICE_FAMILY_8000) {
+               error = SB_CPU_2_STATUS;
+               data1 = SB_CPU_1_STATUS;
+       } else {
+               return;
+       }
 
        error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
 
        IWL_ERR(trans, "IML/ROM dump:\n");
 
        if (error & 0xFFFF0000)
-               IWL_ERR(trans, "IML/ROM SYSASSERT:\n");
+               IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
 
        IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
        IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
-               iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS));
+               iwl_read_umac_prph(trans, data1));
+
+       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+               IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
+                       iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
 }
 
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
@@ -525,8 +543,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
 
        iwl_mvm_dump_umac_error_log(mvm);
 
-       if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_mvm_dump_iml_error_log(mvm);
+       iwl_mvm_dump_iml_error_log(mvm);
 
        iwl_fw_error_print_fseq_regs(&mvm->fwrt);
 }
@@ -832,6 +849,36 @@ struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
        return bss_iter_data.vif;
 }
 
+struct iwl_bss_find_iter_data {
+       struct ieee80211_vif *vif;
+       u32 macid;
+};
+
+static void iwl_mvm_bss_find_iface_iterator(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       struct iwl_bss_find_iter_data *data = _data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       if (mvmvif->id == data->macid)
+               data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_vif_by_macid(struct iwl_mvm *mvm, u32 macid)
+{
+       struct iwl_bss_find_iter_data data = {
+               .macid = macid,
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ieee80211_iterate_active_interfaces_atomic(
+               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+               iwl_mvm_bss_find_iface_iterator, &data);
+
+       return data.vif;
+}
+
 struct iwl_sta_iter_data {
        bool assoc;
 };
index ed3f5b7..c45542f 100644 (file)
@@ -478,40 +478,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 
        {IWL_PCI_DEVICE(0x2723, PCI_ANY_ID, iwl_ax200_trans_cfg)},
 
-       {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0xE020, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0xE024, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x4020, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x6020, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x6024, iwlax210_2ax_cfg_ty_gf_a0)},
-       {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0078, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x2726, 0x007C, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0)},
-       {IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
-       {IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
-       {IWL_PCI_DEVICE(0x2726, 0x2074, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x2726, 0x4070, iwlax201_cfg_snj_hr_b0)},
-       {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
-       {IWL_PCI_DEVICE(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long)},
-       {IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
-       {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
-       {IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
-       {IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
-       {IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+/* So devices */
+       {IWL_PCI_DEVICE(0x2725, PCI_ANY_ID, iwl_so_trans_cfg)},
+       {IWL_PCI_DEVICE(0x2726, PCI_ANY_ID, iwl_snj_trans_cfg)},
+       {IWL_PCI_DEVICE(0x7A70, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+       {IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
+       {IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
+       {IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
 
 /* Ma devices */
        {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
@@ -555,15 +528,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
        IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
 
-       /* QnJ with Hr */
-       IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
-
-       /* SnJ with HR*/
-       IWL_DEV_INFO(0x2726, 0x0244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
-       IWL_DEV_INFO(0x2726, 0x1651, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
-       IWL_DEV_INFO(0x2726, 0x1652, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
-       IWL_DEV_INFO(0x2726, 0x4244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
-
        /* Qu with Hr */
        IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
@@ -629,6 +593,34 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
 
+       /* So with HR */
+       IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
+       IWL_DEV_INFO(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+       IWL_DEV_INFO(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+       IWL_DEV_INFO(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+       IWL_DEV_INFO(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0, NULL),
+       IWL_DEV_INFO(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+       IWL_DEV_INFO(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+       IWL_DEV_INFO(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long, NULL),
+       IWL_DEV_INFO(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+       IWL_DEV_INFO(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+       IWL_DEV_INFO(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL),
+       IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL),
+
+       /* SnJ with HR */
+       IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
+       IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL),
+       IWL_DEV_INFO(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0, NULL),
+       IWL_DEV_INFO(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
+       IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL),
+       IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
+       IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
+
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
@@ -935,6 +927,59 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_quz_a0_hr1_b0, iwl_ax101_name),
 
+/* QnJ with Hr */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_QNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+
+/* SnJ with Jf */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9461_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9461_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9462_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9462_name),
+
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9560_160_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+                     IWL_CFG_NO_160, IWL_CFG_CORES_BT,
+                     iwl_cfg_snj_a0_jf_b0, iwl9560_name),
+
+/* SnJ with Hr */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_cfg_snj_hr_b0, iwl_ax101_name),
+
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_cfg_snj_hr_b0, iwl_ax201_name),
+
 /* Ma */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
                      IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
@@ -1015,6 +1060,16 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        }
 
+       /*
+        * Workaround for problematic SnJ device: sometimes when
+        * certain RF modules are connected to SnJ, the device ID
+        * changes to QnJ's ID.  So we are using QnJ's trans_cfg until
+        * here.  But if we detect that the MAC type is actually SnJ,
+        * we should switch to it here to avoid problems later.
+        */
+       if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_SNJ)
+               iwl_trans->trans_cfg = &iwl_so_trans_cfg;
+
 #if IS_ENABLED(CONFIG_IWLMVM)
        /*
         * special-case 7265D, it has the same PCI IDs.
index a528d3d..d9688c7 100644 (file)
@@ -363,7 +363,6 @@ struct iwl_trans_pcie {
        bool ucode_write_complete;
        bool sx_complete;
        wait_queue_head_t ucode_write_waitq;
-       wait_queue_head_t wait_command_queue;
        wait_queue_head_t sx_waitq;
 
        u8 def_rx_queue;
@@ -418,8 +417,7 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
        return (void *)trans->trans_specific;
 }
 
-static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
-                                     struct msix_entry *entry)
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
 {
        /*
         * Before sending the interrupt the HW disables it to prevent
@@ -429,7 +427,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
         * write 1 clear (W1C) register, meaning that it's being clear
         * by writing 1 to the bit.
         */
-       iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+       iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
 }
 
 static inline struct iwl_trans *
@@ -462,7 +460,6 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans);
 void iwl_pcie_rx_free(struct iwl_trans *trans);
 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
                            struct iwl_rxq *rxq);
 
@@ -569,9 +566,9 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        _iwl_disable_interrupts(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 }
 
 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
@@ -601,9 +598,9 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        _iwl_enable_interrupts(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 }
 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
 {
@@ -762,7 +759,6 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
 
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
@@ -800,4 +796,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
                                  bool test, bool reset);
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+                              struct iwl_host_cmd *cmd);
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+                         struct iwl_host_cmd *cmd);
 #endif /* __iwl_trans_int_pcie_h__ */
index 37bbd9a..407809c 100644 (file)
@@ -207,10 +207,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
 
                if (!rxq->need_update)
                        continue;
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
                iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
                rxq->need_update = false;
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
        }
 }
 
@@ -255,7 +255,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
        if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
                return;
 
-       spin_lock(&rxq->lock);
+       spin_lock_bh(&rxq->lock);
        while (rxq->free_count) {
                /* Get next free Rx buffer, remove from free list */
                rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
@@ -269,16 +269,16 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
                rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
                rxq->free_count--;
        }
-       spin_unlock(&rxq->lock);
+       spin_unlock_bh(&rxq->lock);
 
        /*
         * If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8.
         */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
                iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
        }
 }
 
@@ -301,7 +301,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
        if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
                return;
 
-       spin_lock(&rxq->lock);
+       spin_lock_bh(&rxq->lock);
        while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
                __le32 *bd = (__le32 *)rxq->bd;
                /* The overwritten rxb must be a used one */
@@ -320,14 +320,14 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
        }
-       spin_unlock(&rxq->lock);
+       spin_unlock_bh(&rxq->lock);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
                iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
        }
 }
 
@@ -433,28 +433,28 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
        while (1) {
                unsigned int offset;
 
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
                if (list_empty(&rxq->rx_used)) {
-                       spin_unlock(&rxq->lock);
+                       spin_unlock_bh(&rxq->lock);
                        return;
                }
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
 
                page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
                if (!page)
                        return;
 
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
 
                if (list_empty(&rxq->rx_used)) {
-                       spin_unlock(&rxq->lock);
+                       spin_unlock_bh(&rxq->lock);
                        __free_pages(page, trans_pcie->rx_page_order);
                        return;
                }
                rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
                                       list);
                list_del(&rxb->list);
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
 
                BUG_ON(rxb->page);
                rxb->page = page;
@@ -466,19 +466,19 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
                                     DMA_FROM_DEVICE);
                if (dma_mapping_error(trans->dev, rxb->page_dma)) {
                        rxb->page = NULL;
-                       spin_lock(&rxq->lock);
+                       spin_lock_bh(&rxq->lock);
                        list_add(&rxb->list, &rxq->rx_used);
-                       spin_unlock(&rxq->lock);
+                       spin_unlock_bh(&rxq->lock);
                        __free_pages(page, trans_pcie->rx_page_order);
                        return;
                }
 
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
 
                list_add_tail(&rxb->list, &rxq->rx_free);
                rxq->free_count++;
 
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
        }
 }
 
@@ -514,10 +514,10 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
        IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
 
        /* If we were scheduled - there is at least one request */
-       spin_lock(&rba->lock);
+       spin_lock_bh(&rba->lock);
        /* swap out the rba->rbd_empty to a local list */
        list_replace_init(&rba->rbd_empty, &local_empty);
-       spin_unlock(&rba->lock);
+       spin_unlock_bh(&rba->lock);
 
        while (pending) {
                int i;
@@ -577,21 +577,21 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                                              pending);
                }
 
-               spin_lock(&rba->lock);
+               spin_lock_bh(&rba->lock);
                /* add the allocated rbds to the allocator allocated list */
                list_splice_tail(&local_allocated, &rba->rbd_allocated);
                /* get more empty RBDs for current pending requests */
                list_splice_tail_init(&rba->rbd_empty, &local_empty);
-               spin_unlock(&rba->lock);
+               spin_unlock_bh(&rba->lock);
 
                atomic_inc(&rba->req_ready);
 
        }
 
-       spin_lock(&rba->lock);
+       spin_lock_bh(&rba->lock);
        /* return unused rbds to the allocator empty list */
        list_splice_tail(&local_empty, &rba->rbd_empty);
-       spin_unlock(&rba->lock);
+       spin_unlock_bh(&rba->lock);
 
        IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
 }
@@ -1008,10 +1008,76 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        rxq->used_count = 0;
 }
 
-int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+
+static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
 {
-       WARN_ON(1);
-       return 0;
+       struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+       struct iwl_trans_pcie *trans_pcie;
+       struct iwl_trans *trans;
+       int ret;
+
+       trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+       trans = trans_pcie->trans;
+
+       ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+       if (ret < budget) {
+               spin_lock(&trans_pcie->irq_lock);
+               if (test_bit(STATUS_INT_ENABLED, &trans->status))
+                       _iwl_enable_interrupts(trans);
+               spin_unlock(&trans_pcie->irq_lock);
+
+               napi_complete_done(&rxq->napi, ret);
+       }
+
+       return ret;
+}
+
+static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
+{
+       struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+       struct iwl_trans_pcie *trans_pcie;
+       struct iwl_trans *trans;
+       int ret;
+
+       trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+       trans = trans_pcie->trans;
+
+       ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+       if (ret < budget) {
+               spin_lock(&trans_pcie->irq_lock);
+               iwl_pcie_clear_irq(trans, rxq->id);
+               spin_unlock(&trans_pcie->irq_lock);
+
+               napi_complete_done(&rxq->napi, ret);
+       }
+
+       return ret;
+}
+
+static int iwl_pcie_napi_poll_msix_shared(struct napi_struct *napi, int budget)
+{
+       struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
+       struct iwl_trans_pcie *trans_pcie;
+       struct iwl_trans *trans;
+       int ret;
+
+       trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+       trans = trans_pcie->trans;
+
+       ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
+
+       if (ret < budget) {
+               spin_lock(&trans_pcie->irq_lock);
+               iwl_pcie_clear_irq(trans, 0);
+               spin_unlock(&trans_pcie->irq_lock);
+
+               napi_complete_done(&rxq->napi, ret);
+       }
+
+       return ret;
 }
 
 static int _iwl_pcie_rx_init(struct iwl_trans *trans)
@@ -1030,12 +1096,12 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
 
        cancel_work_sync(&rba->rx_alloc);
 
-       spin_lock(&rba->lock);
+       spin_lock_bh(&rba->lock);
        atomic_set(&rba->req_pending, 0);
        atomic_set(&rba->req_ready, 0);
        INIT_LIST_HEAD(&rba->rbd_allocated);
        INIT_LIST_HEAD(&rba->rbd_empty);
-       spin_unlock(&rba->lock);
+       spin_unlock_bh(&rba->lock);
 
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_free_rbs_pool(trans);
@@ -1046,7 +1112,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
        for (i = 0; i < trans->num_rx_queues; i++) {
                struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-               spin_lock(&rxq->lock);
+               spin_lock_bh(&rxq->lock);
                /*
                 * Set read write pointer to reflect that we have processed
                 * and used all buffers, but have not restocked the Rx queue
@@ -1062,11 +1128,27 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
 
                iwl_pcie_rx_init_rxb_lists(rxq);
 
-               if (!rxq->napi.poll)
+               if (!rxq->napi.poll) {
+                       int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
+
+                       if (trans_pcie->msix_enabled) {
+                               poll = iwl_pcie_napi_poll_msix;
+
+                               if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX &&
+                                   i == 0)
+                                       poll = iwl_pcie_napi_poll_msix_shared;
+
+                               if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
+                                   i == 1)
+                                       poll = iwl_pcie_napi_poll_msix_shared;
+                       }
+
                        netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
-                                      iwl_pcie_dummy_napi_poll, 64);
+                                      poll, NAPI_POLL_WEIGHT);
+                       napi_enable(&rxq->napi);
+               }
 
-               spin_unlock(&rxq->lock);
+               spin_unlock_bh(&rxq->lock);
        }
 
        /* move the pool to the default queue and allocator ownerships */
@@ -1108,9 +1190,9 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 
        iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
 
-       spin_lock(&trans_pcie->rxq->lock);
+       spin_lock_bh(&trans_pcie->rxq->lock);
        iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
-       spin_unlock(&trans_pcie->rxq->lock);
+       spin_unlock_bh(&trans_pcie->rxq->lock);
 
        return 0;
 }
@@ -1163,8 +1245,10 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 
                iwl_pcie_free_rxq_dma(trans, rxq);
 
-               if (rxq->napi.poll)
+               if (rxq->napi.poll) {
+                       napi_disable(&rxq->napi);
                        netif_napi_del(&rxq->napi);
+               }
        }
        kfree(trans_pcie->rx_pool);
        kfree(trans_pcie->global_table);
@@ -1417,16 +1501,15 @@ out_err:
 /*
  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
  */
-static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct napi_struct *napi;
        struct iwl_rxq *rxq;
-       u32 r, i, count = 0;
+       u32 r, i, count = 0, handled = 0;
        bool emergency = false;
 
        if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
-               return;
+               return budget;
 
        rxq = &trans_pcie->rxq[queue];
 
@@ -1444,7 +1527,7 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
 
-       while (i != r) {
+       while (i != r && ++handled < budget) {
                struct iwl_rb_allocator *rba = &trans_pcie->rba;
                struct iwl_rx_mem_buffer *rxb;
                /* number of RBDs still waiting for page allocation */
@@ -1545,18 +1628,9 @@ out:
        if (unlikely(emergency && count))
                iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
 
-       napi = &rxq->napi;
-       if (napi->poll) {
-               napi_gro_flush(napi, false);
-
-               if (napi->rx_count) {
-                       netif_receive_skb_list(&napi->rx_list);
-                       INIT_LIST_HEAD(&napi->rx_list);
-                       napi->rx_count = 0;
-               }
-       }
-
        iwl_pcie_rxq_restock(trans, rxq);
+
+       return handled;
 }
 
 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
@@ -1576,6 +1650,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
        struct msix_entry *entry = dev_id;
        struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
        struct iwl_trans *trans = trans_pcie->trans;
+       struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
 
        trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
 
@@ -1585,11 +1660,12 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
        local_bh_disable();
-       iwl_pcie_rx_handle(trans, entry->entry);
+       if (napi_schedule_prep(&rxq->napi))
+               __napi_schedule(&rxq->napi);
+       else
+               iwl_pcie_clear_irq(trans, entry->entry);
        local_bh_enable();
 
-       iwl_pcie_clear_irq(trans, entry);
-
        lock_map_release(&trans->sync_cmd_lockdep_map);
 
        return IRQ_HANDLED;
@@ -1600,7 +1676,6 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
  */
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i;
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1612,7 +1687,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
                            APMG_PS_CTRL_VAL_RESET_REQ))) {
                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
                iwl_op_mode_wimax_active(trans->op_mode);
-               wake_up(&trans_pcie->wait_command_queue);
+               wake_up(&trans->wait_command_queue);
                return;
        }
 
@@ -1627,7 +1702,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        iwl_trans_fw_error(trans);
 
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-       wake_up(&trans_pcie->wait_command_queue);
+       wake_up(&trans->wait_command_queue);
 }
 
 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
@@ -1742,7 +1817,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
                                       &trans->status))
                        IWL_DEBUG_RF_KILL(trans,
                                          "Rfkill while SYNC HCMD in flight\n");
-               wake_up(&trans_pcie->wait_command_queue);
+               wake_up(&trans->wait_command_queue);
        } else {
                clear_bit(STATUS_RFKILL_HW, &trans->status);
                if (trans_pcie->opmode_down)
@@ -1757,10 +1832,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta = 0;
        u32 handled = 0;
+       bool polling = false;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
 
        /* dram interrupt table not set yet,
         * use legacy interrupt.
@@ -1797,7 +1873,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                 */
                if (test_bit(STATUS_INT_ENABLED, &trans->status))
                        _iwl_enable_interrupts(trans);
-               spin_unlock(&trans_pcie->irq_lock);
+               spin_unlock_bh(&trans_pcie->irq_lock);
                lock_map_release(&trans->sync_cmd_lockdep_map);
                return IRQ_NONE;
        }
@@ -1808,7 +1884,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                 * already raised an interrupt.
                 */
                IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
-               spin_unlock(&trans_pcie->irq_lock);
+               spin_unlock_bh(&trans_pcie->irq_lock);
                goto out;
        }
 
@@ -1829,7 +1905,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
                              inta, iwl_read32(trans, CSR_INT_MASK));
 
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 
        /* Now service all interrupt bits discovered above. */
        if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1949,7 +2025,10 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                isr_stats->rx++;
 
                local_bh_disable();
-               iwl_pcie_rx_handle(trans, 0);
+               if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+                       polling = true;
+                       __napi_schedule(&trans_pcie->rxq[0].napi);
+               }
                local_bh_enable();
        }
 
@@ -1974,20 +2053,22 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
                         inta & ~trans_pcie->inta_mask);
        }
 
-       spin_lock(&trans_pcie->irq_lock);
-       /* only Re-enable all interrupt if disabled by irq */
-       if (test_bit(STATUS_INT_ENABLED, &trans->status))
-               _iwl_enable_interrupts(trans);
-       /* we are loading the firmware, enable FH_TX interrupt only */
-       else if (handled & CSR_INT_BIT_FH_TX)
-               iwl_enable_fw_load_int(trans);
-       /* Re-enable RF_KILL if it occurred */
-       else if (handled & CSR_INT_BIT_RF_KILL)
-               iwl_enable_rfkill_int(trans);
-       /* Re-enable the ALIVE / Rx interrupt if it occurred */
-       else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
-               iwl_enable_fw_load_int_ctx_info(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       if (!polling) {
+               spin_lock_bh(&trans_pcie->irq_lock);
+               /* only Re-enable all interrupt if disabled by irq */
+               if (test_bit(STATUS_INT_ENABLED, &trans->status))
+                       _iwl_enable_interrupts(trans);
+               /* we are loading the firmware, enable FH_TX interrupt only */
+               else if (handled & CSR_INT_BIT_FH_TX)
+                       iwl_enable_fw_load_int(trans);
+               /* Re-enable RF_KILL if it occurred */
+               else if (handled & CSR_INT_BIT_RF_KILL)
+                       iwl_enable_rfkill_int(trans);
+               /* Re-enable the ALIVE / Rx interrupt if it occurred */
+               else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
+                       iwl_enable_fw_load_int_ctx_info(trans);
+               spin_unlock_bh(&trans_pcie->irq_lock);
+       }
 
 out:
        lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -2049,7 +2130,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
        if (!trans_pcie->ict_tbl)
                return;
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        _iwl_disable_interrupts(trans);
 
        memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -2067,7 +2148,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
        trans_pcie->ict_index = 0;
        iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
        _iwl_enable_interrupts(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 }
 
 /* Device is going down disable ict interrupt usage */
@@ -2075,9 +2156,9 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        trans_pcie->use_ict = false;
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 }
 
 irqreturn_t iwl_pcie_isr(int irq, void *data)
@@ -2109,10 +2190,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        struct iwl_trans *trans = trans_pcie->trans;
        struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
        u32 inta_fh, inta_hw;
+       bool polling = false;
 
        lock_map_acquire(&trans->sync_cmd_lockdep_map);
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
        inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
        /*
@@ -2120,7 +2202,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
         */
        iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
        iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 
        trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
 
@@ -2146,14 +2228,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
            inta_fh & MSIX_FH_INT_CAUSES_Q0) {
                local_bh_disable();
-               iwl_pcie_rx_handle(trans, 0);
+               if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
+                       polling = true;
+                       __napi_schedule(&trans_pcie->rxq[0].napi);
+               }
                local_bh_enable();
        }
 
        if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
            inta_fh & MSIX_FH_INT_CAUSES_Q1) {
                local_bh_disable();
-               iwl_pcie_rx_handle(trans, 1);
+               if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
+                       polling = true;
+                       __napi_schedule(&trans_pcie->rxq[1].napi);
+               }
                local_bh_enable();
        }
 
@@ -2248,7 +2336,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
                wake_up(&trans_pcie->fw_reset_waitq);
        }
 
-       iwl_pcie_clear_irq(trans, entry);
+       if (!polling)
+               iwl_pcie_clear_irq(trans, entry->entry);
 
        lock_map_release(&trans->sync_cmd_lockdep_map);
 
index c602b81..7051555 100644 (file)
@@ -213,9 +213,9 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
                               trans->cfg->min_txq_size);
 
        /* TODO: most of the logic can be removed in A0 - but not in Z0 */
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        iwl_pcie_gen2_apm_init(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 
        iwl_op_mode_nic_config(trans->op_mode);
 
index ab93a84..16f5757 100644 (file)
@@ -511,9 +511,9 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
        int ret;
 
        /* nic_init */
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
        ret = iwl_pcie_apm_init(trans);
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 
        if (ret)
                return ret;
@@ -3286,16 +3286,29 @@ static struct iwl_trans_dump_data
        return dump_data;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
 {
-       return 0;
+       if (enable)
+               iwl_enable_interrupts(trans);
+       else
+               iwl_disable_interrupts(trans);
 }
 
-static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
 {
+       u32 inta_addr, sw_err_bit;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans_pcie->msix_enabled) {
+               inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
+               sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+       } else {
+               inta_addr = CSR_INT;
+               sw_err_bit = CSR_INT_BIT_SW_ERR;
+       }
+
+       iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
 }
-#endif /* CONFIG_PM_SLEEP */
 
 #define IWL_TRANS_COMMON_OPS                                           \
        .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
@@ -3316,25 +3329,17 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
        .dump_data = iwl_trans_pcie_dump_data,                          \
        .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
        .d3_resume = iwl_trans_pcie_d3_resume,                          \
-       .sync_nmi = iwl_trans_pcie_sync_nmi
-
-#ifdef CONFIG_PM_SLEEP
-#define IWL_TRANS_PM_OPS                                               \
-       .suspend = iwl_trans_pcie_suspend,                              \
-       .resume = iwl_trans_pcie_resume,
-#else
-#define IWL_TRANS_PM_OPS
-#endif /* CONFIG_PM_SLEEP */
+       .interrupts = iwl_trans_pci_interrupts,                         \
+       .sync_nmi = iwl_trans_pcie_sync_nmi                             \
 
 static const struct iwl_trans_ops trans_ops_pcie = {
        IWL_TRANS_COMMON_OPS,
-       IWL_TRANS_PM_OPS
        .start_hw = iwl_trans_pcie_start_hw,
        .fw_alive = iwl_trans_pcie_fw_alive,
        .start_fw = iwl_trans_pcie_start_fw,
        .stop_device = iwl_trans_pcie_stop_device,
 
-       .send_cmd = iwl_trans_pcie_send_hcmd,
+       .send_cmd = iwl_pcie_enqueue_hcmd,
 
        .tx = iwl_trans_pcie_tx,
        .reclaim = iwl_txq_reclaim,
@@ -3355,13 +3360,12 @@ static const struct iwl_trans_ops trans_ops_pcie = {
 
 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
        IWL_TRANS_COMMON_OPS,
-       IWL_TRANS_PM_OPS
        .start_hw = iwl_trans_pcie_start_hw,
        .fw_alive = iwl_trans_pcie_gen2_fw_alive,
        .start_fw = iwl_trans_pcie_gen2_start_fw,
        .stop_device = iwl_trans_pcie_gen2_stop_device,
 
-       .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+       .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
 
        .tx = iwl_txq_gen2_tx,
        .reclaim = iwl_txq_reclaim,
@@ -3496,9 +3500,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
                 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
 
-       /* Initialize the wait queue for commands */
-       init_waitqueue_head(&trans_pcie->wait_command_queue);
-
        init_waitqueue_head(&trans_pcie->sx_waitq);
 
 
@@ -3538,48 +3539,3 @@ out_free_trans:
        iwl_trans_free(trans);
        return ERR_PTR(ret);
 }
-
-void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
-       bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
-       u32 inta_addr, sw_err_bit;
-
-       if (trans_pcie->msix_enabled) {
-               inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
-               sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
-       } else {
-               inta_addr = CSR_INT;
-               sw_err_bit = CSR_INT_BIT_SW_ERR;
-       }
-
-       /* if the interrupts were already disabled, there is no point in
-        * calling iwl_disable_interrupts
-        */
-       if (interrupts_enabled)
-               iwl_disable_interrupts(trans);
-
-       iwl_force_nmi(trans);
-       while (time_after(timeout, jiffies)) {
-               u32 inta_hw = iwl_read32(trans, inta_addr);
-
-               /* Error detected by uCode */
-               if (inta_hw & sw_err_bit) {
-                       /* Clear causes register */
-                       iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
-                       break;
-               }
-
-               mdelay(1);
-       }
-
-       /* enable interrupts only if there were already enabled before this
-        * function to avoid a case were the driver enable interrupts before
-        * proper configurations were made
-        */
-       if (interrupts_enabled)
-               iwl_enable_interrupts(trans);
-
-       iwl_trans_fw_error(trans);
-}
index 8757246..1099df7 100644 (file)
@@ -24,8 +24,8 @@
  * failed. On success, it returns the index (>= 0) of command in the
  * command queue.
  */
-static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
-                                     struct iwl_host_cmd *cmd)
+int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+                              struct iwl_host_cmd *cmd)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
@@ -257,124 +257,3 @@ free_dup_buf:
                kfree(dup_buf);
        return idx;
 }
-
-#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
-
-static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
-                                       struct iwl_host_cmd *cmd)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
-       struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
-       int cmd_idx;
-       int ret;
-
-       IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
-       if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
-                                 &trans->status),
-                "Command %s: a command is already active!\n", cmd_str))
-               return -EIO;
-
-       IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
-       cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
-       if (cmd_idx < 0) {
-               ret = cmd_idx;
-               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-               IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
-                       cmd_str, ret);
-               return ret;
-       }
-
-       ret = wait_event_timeout(trans_pcie->wait_command_queue,
-                                !test_bit(STATUS_SYNC_HCMD_ACTIVE,
-                                          &trans->status),
-                                HOST_COMPLETE_TIMEOUT);
-       if (!ret) {
-               IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
-                       cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
-               IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
-                       txq->read_ptr, txq->write_ptr);
-
-               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-               IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
-                              cmd_str);
-               ret = -ETIMEDOUT;
-
-               iwl_trans_pcie_sync_nmi(trans);
-               goto cancel;
-       }
-
-       if (test_bit(STATUS_FW_ERROR, &trans->status)) {
-               IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
-               dump_stack();
-               ret = -EIO;
-               goto cancel;
-       }
-
-       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
-               IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
-               ret = -ERFKILL;
-               goto cancel;
-       }
-
-       if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
-               IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
-               ret = -EIO;
-               goto cancel;
-       }
-
-       return 0;
-
-cancel:
-       if (cmd->flags & CMD_WANT_SKB) {
-               /*
-                * Cancel the CMD_WANT_SKB flag for the cmd in the
-                * TX cmd queue. Otherwise in case the cmd comes
-                * in later, it will possibly set an invalid
-                * address (cmd->meta.source).
-                */
-               txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
-       }
-
-       if (cmd->resp_pkt) {
-               iwl_free_resp(cmd);
-               cmd->resp_pkt = NULL;
-       }
-
-       return ret;
-}
-
-int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
-                                 struct iwl_host_cmd *cmd)
-{
-       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
-               IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
-                                 cmd->id);
-               return -ERFKILL;
-       }
-
-       if (cmd->flags & CMD_ASYNC) {
-               int ret;
-
-               /* An asynchronous command can not expect an SKB to be set. */
-               if (WARN_ON(cmd->flags & CMD_WANT_SKB))
-                       return -EINVAL;
-
-               ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
-               if (ret < 0) {
-                       IWL_ERR(trans,
-                               "Error sending %s: enqueue_hcmd failed: %d\n",
-                               iwl_get_cmd_string(trans, cmd->id), ret);
-                       return ret;
-               }
-               return 0;
-       }
-
-       return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
-}
-
index 83f4964..2e9d411 100644 (file)
@@ -398,7 +398,7 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
        int ch, ret;
        u32 mask = 0;
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
 
        if (!iwl_trans_grab_nic_access(trans, &flags))
                goto out;
@@ -419,7 +419,7 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
        iwl_trans_release_nic_access(trans, &flags);
 
 out:
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 }
 
 /*
@@ -576,7 +576,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                alloc = true;
        }
 
-       spin_lock(&trans_pcie->irq_lock);
+       spin_lock_bh(&trans_pcie->irq_lock);
 
        /* Turn off all Tx DMA fifos */
        iwl_scd_deactivate_fifos(trans);
@@ -585,7 +585,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
        iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
                           trans_pcie->kw.dma >> 4);
 
-       spin_unlock(&trans_pcie->irq_lock);
+       spin_unlock_bh(&trans_pcie->irq_lock);
 
        /* Alloc and init all Tx queues, including the command queue (#4/#9) */
        for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
@@ -914,8 +914,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
  * failed. On success, it returns the index (>= 0) of command in the
  * command queue.
  */
-static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
-                                struct iwl_host_cmd *cmd)
+int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+                         struct iwl_host_cmd *cmd)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
@@ -1249,7 +1249,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
                IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
                               iwl_get_cmd_string(trans, cmd_id));
-               wake_up(&trans_pcie->wait_command_queue);
+               wake_up(&trans->wait_command_queue);
        }
 
        meta->flags = 0;
@@ -1257,142 +1257,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        spin_unlock_bh(&txq->lock);
 }
 
-#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
-
-static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
-                                   struct iwl_host_cmd *cmd)
-{
-       int ret;
-
-       /* An asynchronous command can not expect an SKB to be set. */
-       if (WARN_ON(cmd->flags & CMD_WANT_SKB))
-               return -EINVAL;
-
-       ret = iwl_pcie_enqueue_hcmd(trans, cmd);
-       if (ret < 0) {
-               IWL_ERR(trans,
-                       "Error sending %s: enqueue_hcmd failed: %d\n",
-                       iwl_get_cmd_string(trans, cmd->id), ret);
-               return ret;
-       }
-       return 0;
-}
-
-static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
-                                  struct iwl_host_cmd *cmd)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
-       int cmd_idx;
-       int ret;
-
-       IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
-                      iwl_get_cmd_string(trans, cmd->id));
-
-       if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
-                                 &trans->status),
-                "Command %s: a command is already active!\n",
-                iwl_get_cmd_string(trans, cmd->id)))
-               return -EIO;
-
-       IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
-                      iwl_get_cmd_string(trans, cmd->id));
-
-       cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
-       if (cmd_idx < 0) {
-               ret = cmd_idx;
-               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-               IWL_ERR(trans,
-                       "Error sending %s: enqueue_hcmd failed: %d\n",
-                       iwl_get_cmd_string(trans, cmd->id), ret);
-               return ret;
-       }
-
-       ret = wait_event_timeout(trans_pcie->wait_command_queue,
-                                !test_bit(STATUS_SYNC_HCMD_ACTIVE,
-                                          &trans->status),
-                                HOST_COMPLETE_TIMEOUT);
-       if (!ret) {
-               IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
-                       iwl_get_cmd_string(trans, cmd->id),
-                       jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
-               IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
-                       txq->read_ptr, txq->write_ptr);
-
-               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
-               IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
-                              iwl_get_cmd_string(trans, cmd->id));
-               ret = -ETIMEDOUT;
-
-               iwl_trans_pcie_sync_nmi(trans);
-               goto cancel;
-       }
-
-       if (test_bit(STATUS_FW_ERROR, &trans->status)) {
-               iwl_trans_pcie_dump_regs(trans);
-               IWL_ERR(trans, "FW error in SYNC CMD %s\n",
-                       iwl_get_cmd_string(trans, cmd->id));
-               dump_stack();
-               ret = -EIO;
-               goto cancel;
-       }
-
-       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
-               IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
-               ret = -ERFKILL;
-               goto cancel;
-       }
-
-       if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
-               IWL_ERR(trans, "Error: Response NULL in '%s'\n",
-                       iwl_get_cmd_string(trans, cmd->id));
-               ret = -EIO;
-               goto cancel;
-       }
-
-       return 0;
-
-cancel:
-       if (cmd->flags & CMD_WANT_SKB) {
-               /*
-                * Cancel the CMD_WANT_SKB flag for the cmd in the
-                * TX cmd queue. Otherwise in case the cmd comes
-                * in later, it will possibly set an invalid
-                * address (cmd->meta.source).
-                */
-               txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
-       }
-
-       if (cmd->resp_pkt) {
-               iwl_free_resp(cmd);
-               cmd->resp_pkt = NULL;
-       }
-
-       return ret;
-}
-
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
-{
-       /* Make sure the NIC is still alive in the bus */
-       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
-               return -ENODEV;
-
-       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
-           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
-               IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
-                                 cmd->id);
-               return -ERFKILL;
-       }
-
-       if (cmd->flags & CMD_ASYNC)
-               return iwl_pcie_send_hcmd_async(trans, cmd);
-
-       /* We still can fail on RFKILL that can be asserted while we wait */
-       return iwl_pcie_send_hcmd_sync(trans, cmd);
-}
-
 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
                             struct iwl_txq *txq, u8 hdr_len,
                             struct iwl_cmd_meta *out_meta)
index 7ff1bb0..3cac83b 100644 (file)
@@ -1722,3 +1722,132 @@ next_queue:
        }
 }
 
+#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
+
+static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
+                                       struct iwl_host_cmd *cmd)
+{
+       const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+       struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+       int cmd_idx;
+       int ret;
+
+       IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+       if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+                                 &trans->status),
+                "Command %s: a command is already active!\n", cmd_str))
+               return -EIO;
+
+       IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+       cmd_idx = trans->ops->send_cmd(trans, cmd);
+       if (cmd_idx < 0) {
+               ret = cmd_idx;
+               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+               IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+                       cmd_str, ret);
+               return ret;
+       }
+
+       ret = wait_event_timeout(trans->wait_command_queue,
+                                !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+                                          &trans->status),
+                                HOST_COMPLETE_TIMEOUT);
+       if (!ret) {
+               IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+                       cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+               IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+                       txq->read_ptr, txq->write_ptr);
+
+               clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+               IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+                              cmd_str);
+               ret = -ETIMEDOUT;
+
+               iwl_trans_sync_nmi(trans);
+               goto cancel;
+       }
+
+       if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+               IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+               dump_stack();
+               ret = -EIO;
+               goto cancel;
+       }
+
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+               IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+               ret = -ERFKILL;
+               goto cancel;
+       }
+
+       if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+               IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+               ret = -EIO;
+               goto cancel;
+       }
+
+       return 0;
+
+cancel:
+       if (cmd->flags & CMD_WANT_SKB) {
+               /*
+                * Cancel the CMD_WANT_SKB flag for the cmd in the
+                * TX cmd queue. Otherwise in case the cmd comes
+                * in later, it will possibly set an invalid
+                * address (cmd->meta.source).
+                */
+               txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+       }
+
+       if (cmd->resp_pkt) {
+               iwl_free_resp(cmd);
+               cmd->resp_pkt = NULL;
+       }
+
+       return ret;
+}
+
+int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
+                           struct iwl_host_cmd *cmd)
+{
+       /* Make sure the NIC is still alive in the bus */
+       if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+               return -ENODEV;
+
+       if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+           test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+               IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+                                 cmd->id);
+               return -ERFKILL;
+       }
+
+       if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+                    !(cmd->flags & CMD_SEND_IN_D3))) {
+               IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
+               return -EHOSTDOWN;
+       }
+
+       if (cmd->flags & CMD_ASYNC) {
+               int ret;
+
+               /* An asynchronous command can not expect an SKB to be set. */
+               if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+                       return -EINVAL;
+
+               ret = trans->ops->send_cmd(trans, cmd);
+               if (ret < 0) {
+                       IWL_ERR(trans,
+                               "Error sending %s: enqueue_hcmd failed: %d\n",
+                               iwl_get_cmd_string(trans, cmd->id), ret);
+                       return ret;
+               }
+               return 0;
+       }
+
+       return iwl_trans_txq_send_hcmd_sync(trans, cmd);
+}
+
index cff694c..af1dbdf 100644 (file)
@@ -181,4 +181,5 @@ void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
                                bool freeze);
 void iwl_txq_progress(struct iwl_txq *txq);
 void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 #endif /* __iwl_trans_queue_tx_h__ */