Merge tag 'mac80211-next-for-davem-2015-08-14' into next
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Sun, 16 Aug 2015 07:20:58 +0000 (10:20 +0300)
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Sun, 16 Aug 2015 07:20:58 +0000 (10:20 +0300)
Another pull request for the next cycle, this time with quite
a bit of content:
 * mesh fixes/improvements from Alexis, Bob, Chun-Yeow and Jesse
 * TDLS higher bandwidth support (Arik)
 * OCB fixes from Bertold Van den Bergh
 * suspend/resume fixes from Eliad
 * dynamic SMPS support for minstrel-HT (Krishna Chaitanya)
 * VHT bitrate mask support (Lorenzo Bianconi)
 * better regulatory support for 5/10 MHz channels (Matthias May)
 * basic support for MU-MIMO to avoid the multi-vif issue (Sara Sharon)
along with a number of other cleanups.

65 files changed:
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-notif-wait.c
drivers/net/wireless/iwlwifi/iwl-notif-wait.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/tdls.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tof.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c

index c160dad..101ef31 100644 (file)
@@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+                    struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 static inline u32 iwl_tx_status_to_mac80211(u32 status)
 {
@@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                      struct ieee80211_sta *sta);
 
index 074977e..0ba3e56 100644 (file)
@@ -680,9 +680,8 @@ struct iwl_priv {
        enum ieee80211_band band;
        u8 valid_contexts;
 
-       int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd);
+       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
        struct iwl_notif_wait_data notif_wait;
 
index 1d2223d..ab45819 100644 (file)
@@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
        return need_update;
 }
 
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
 
        if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
                /* bt coex disabled */
-               return 0;
+               return;
        }
 
        IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
        /* FIXME: based on notification, adjust the prio_boost */
 
        priv->bt_ci_compliance = coex->bt_ci_compliance;
-       return 0;
 }
 
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
index 7acaa26..453f7c3 100644 (file)
@@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv)
                }
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_run_init_ucode(priv);
        if (ret) {
                IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
                goto error;
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
        if (ret) {
                IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
                u32 error_id;
        } err_info;
        struct iwl_notification_wait status_wait;
-       static const u8 status_cmd[] = {
+       static const u16 status_cmd[] = {
                REPLY_WOWLAN_GET_STATUS,
        };
        struct iwlagn_wowlan_status status_data = {};
index 3bd7c86..cef921c 100644 (file)
@@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
 /*
  * Try to switch to new modulation mode from legacy
  */
-static int rs_move_legacy_other(struct iwl_priv *priv,
-                               struct iwl_lq_sta *lq_sta,
-                               struct ieee80211_conf *conf,
-                               struct ieee80211_sta *sta,
-                               int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta,
+                                int index)
 {
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
 
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
 out:
        lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@ out:
                tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from SISO
  */
-static int rs_move_siso_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+                                 struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_conf *conf,
+                                 struct ieee80211_sta *sta, int index)
 {
        u8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
  out:
        lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                tbl->action = IWL_SISO_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
 }
 
 /*
  * Try to switch to new modulation mode from MIMO2
  */
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
 
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from MIMO3
  */
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
-
 }
 
 /*
index 5a91f5d..1517698 100644 (file)
@@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
  *
  ******************************************************************************/
 
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
                err_resp->cmd_id,
                le16_to_cpu(err_resp->bad_cmd_seq_num),
                le32_to_cpu(err_resp->error_info));
-       return 0;
 }
 
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
        struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
        if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               return 0;
+               return;
 
        if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
                rxon->channel = csa->channel;
@@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        le16_to_cpu(csa->channel));
                iwl_chswitch_done(priv, false);
        }
-       return 0;
 }
 
 
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_cmd_buffer *rxb,
-                                         struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
        if (!report->state) {
                IWL_DEBUG_11H(priv,
                        "Spectrum Measure Notification: Start\n");
-               return 0;
+               return;
        }
 
        memcpy(&priv->measure_report, report, sizeof(*report));
        priv->measurement_status |= MEASUREMENT_READY;
-       return 0;
 }
 
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
        IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
                     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
-       return 0;
 }
 
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 __maybe_unused len = iwl_rx_packet_len(pkt);
        IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
                        "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
        iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
-       return 0;
 }
 
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
 #endif
 
        priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-       return 0;
 }
 
 /**
@@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        unsigned long stamp = jiffies;
        const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                          len, sizeof(struct iwl_bt_notif_statistics),
                          sizeof(struct iwl_notif_statistics));
                spin_unlock(&priv->statistics.lock);
-               return 0;
+               return;
        }
 
        change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                priv->lib->temperature(priv);
 
        spin_unlock(&priv->statistics.lock);
-
-       return 0;
 }
 
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
 #endif
                IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
        }
-       iwlagn_rx_statistics(priv, rxb, cmd);
-       return 0;
+
+       iwlagn_rx_statistics(priv, rxb);
 }
 
 /* Handle notification from uCode that card's power state is changing
  * due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
             test_bit(STATUS_RF_KILL_HW, &priv->status)))
                wiphy_rfkill_set_hw_state(priv->hw->wiphy,
                        test_bit(STATUS_RF_KILL_HW, &priv->status));
-       return 0;
 }
 
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_cmd_buffer *rxb)
 
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
                if (!test_bit(STATUS_SCANNING, &priv->status))
                        iwl_init_sensitivity(priv);
        }
-       return 0;
 }
 
 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
        priv->ampdu_ref++;
        memcpy(&priv->last_phy_res, pkt->data,
               sizeof(struct iwl_rx_phy_res));
-       return 0;
 }
 
 /*
@@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
 }
 
 /* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+                              struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *header;
        struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        if (!priv->last_phy_res_valid) {
                IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-               return 0;
+               return;
        }
        phy_res = &priv->last_phy_res;
        amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
                                phy_res->cfg_phy_cnt);
-               return 0;
+               return;
        }
 
        if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
            !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
                IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
                                le32_to_cpu(rx_pkt_status));
-               return 0;
+               return;
        }
 
        /* This will be used in several places later */
@@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
                                    rxb, &rx_status);
-       return 0;
 }
 
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_wipan_noa_data *new_data, *old_data;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
 
        if (old_data)
                kfree_rcu(old_data, rcu_head);
-
-       return 0;
 }
 
 /**
@@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
  */
 void iwl_setup_rx_handlers(struct iwl_priv *priv)
 {
-       int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+       void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
        handlers = priv->rx_handlers;
 
@@ -1102,12 +1073,10 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
                iwlagn_bt_rx_handler_setup(priv);
 }
 
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                   struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-       int err = 0;
 
        /*
         * Do the notification wait before RX handlers so
@@ -1121,12 +1090,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
         *   rx_handlers table.  See iwl_setup_rx_handlers() */
        if (priv->rx_handlers[pkt->hdr.cmd]) {
                priv->rx_handlers_stats[pkt->hdr.cmd]++;
-               err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+               priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
        } else {
                /* No handling needed */
                IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
                             iwl_dvm_get_cmd_string(pkt->hdr.cmd),
                             pkt->hdr.cmd);
        }
-       return err;
 }
index ed50de6..85ceceb 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        __le32 old_filter = send->filter_flags;
        u8 old_dev_type = send->dev_type;
        int ret;
-       static const u8 deactivate_cmd[] = {
+       static const u16 deactivate_cmd[] = {
                REPLY_WIPAN_DEACTIVATION_COMPLETE
        };
 
index 43bef90..6481594 100644 (file)
@@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
 }
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
 
        IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
 #endif
-       return 0;
 }
 
 /* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->tsf_high),
                       le32_to_cpu(notif->tsf_low),
                       notif->status, notif->beacon_timer);
-
-       return 0;
 }
 
 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->statistics[0]),
                       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
 #endif
-       return 0;
 }
 
 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
                queue_work(priv->workqueue,
                           &priv->bt_traffic_change_work);
        }
-       return 0;
 }
 
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
index 6ec86ad..0fa67d3 100644 (file)
@@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
        return 0;
 }
 
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
-                                   struct iwl_addsta_cmd *addsta,
-                                   struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+                                    struct iwl_rx_packet *pkt)
 {
        struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
-       u8 sta_id = addsta->sta.sta_id;
-       int ret = -EIO;
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-                       pkt->hdr.flags);
-               return ret;
-       }
-
-       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-                      sta_id);
+       IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
 
        spin_lock_bh(&priv->sta_lock);
 
        switch (add_sta_resp->status) {
        case ADD_STA_SUCCESS_MSK:
                IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-               ret = iwl_sta_ucode_activate(priv, sta_id);
                break;
        case ADD_STA_NO_ROOM_IN_TABLE:
-               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-                       sta_id);
+               IWL_ERR(priv, "Adding station failed, no room in table.\n");
                break;
        case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-               IWL_ERR(priv, "Adding station %d failed, no block ack "
-                       "resource.\n", sta_id);
+               IWL_ERR(priv,
+                       "Adding station failed, no block ack resource.\n");
                break;
        case ADD_STA_MODIFY_NON_EXIST_STA:
-               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-                       sta_id);
+               IWL_ERR(priv, "Attempting to modify non-existing station\n");
                break;
        default:
                IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
                break;
        }
 
-       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-                      sta_id, priv->stations[sta_id].sta.sta.addr);
-
-       /*
-        * XXX: The MAC address in the command buffer is often changed from
-        * the original sent to the device. That is, the MAC address
-        * written to the command buffer often is not the same MAC address
-        * read from the command buffer when the command returns. This
-        * issue has not yet been resolved and this debugging is left to
-        * observe the problem.
-        */
-       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-                      addsta->sta.addr);
        spin_unlock_bh(&priv->sta_lock);
-
-       return ret;
 }
 
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-       if (!cmd)
-               return 0;
-
-       return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+       iwl_process_add_sta_resp(priv, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                .len = { sizeof(*sta), },
        };
        u8 sta_id __maybe_unused = sta->sta.sta_id;
+       struct iwl_rx_packet *pkt;
+       struct iwl_add_sta_resp *add_sta_resp;
 
        IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
@@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
        if (ret || (flags & CMD_ASYNC))
                return ret;
-       /*else the command was successfully sent in SYNC mode, need to free
-        * the reply page */
 
-       iwl_free_resp(&cmd);
+       pkt = cmd.resp_pkt;
+       add_sta_resp = (void *)pkt->data;
 
-       if (cmd.handler_status)
-               IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
-                       cmd.handler_status);
+       /* debug messages are printed in the handler */
+       if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+               spin_lock_bh(&priv->sta_lock);
+               ret = iwl_sta_ucode_activate(priv, sta_id);
+               spin_unlock_bh(&priv->sta_lock);
+       } else {
+               ret = -EIO;
+       }
 
-       return cmd.handler_status;
+       iwl_free_resp(&cmd);
+
+       return ret;
 }
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt;
        int ret;
        struct iwl_rem_sta_cmd rm_sta_cmd;
+       struct iwl_rem_sta_resp *rem_sta_resp;
 
        struct iwl_host_cmd cmd = {
                .id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-                         pkt->hdr.flags);
-               ret = -EIO;
-       }
+       rem_sta_resp = (void *)pkt->data;
 
-       if (!ret) {
-               struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
-               switch (rem_sta_resp->status) {
-               case REM_STA_SUCCESS_MSK:
-                       if (!temporary) {
-                               spin_lock_bh(&priv->sta_lock);
-                               iwl_sta_ucode_deactivate(priv, sta_id);
-                               spin_unlock_bh(&priv->sta_lock);
-                       }
-                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-                       break;
-               default:
-                       ret = -EIO;
-                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-                       break;
+       switch (rem_sta_resp->status) {
+       case REM_STA_SUCCESS_MSK:
+               if (!temporary) {
+                       spin_lock_bh(&priv->sta_lock);
+                       iwl_sta_ucode_deactivate(priv, sta_id);
+                       spin_unlock_bh(&priv->sta_lock);
                }
+               IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+               break;
        }
+
        iwl_free_resp(&cmd);
 
        return ret;
index 275df12..bddd197 100644 (file)
@@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
        }
 }
 
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                skb = __skb_dequeue(&skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
 
 /**
@@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
  * Handles block-acknowledge notification from device, which reports success
  * of frames sent via aggregation.
  */
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        if (scd_flow >= priv->cfg->base_params->num_of_queues) {
                IWL_ERR(priv,
                        "BUG_ON scd_flow is bigger than number of queues\n");
-               return 0;
+               return;
        }
 
        sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                if (unlikely(ba_resp->bitmap))
                        IWL_ERR(priv, "Received BA when not expected\n");
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                                    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
                                    scd_flow, sta_id, tid, agg->txq_id);
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        __skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
index 5244e43..931a8e4 100644 (file)
@@ -3,6 +3,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
        const struct fw_img *fw;
        int ret;
        enum iwl_ucode_type old_type;
-       static const u8 alive_cmd[] = { REPLY_ALIVE };
+       static const u16 alive_cmd[] = { REPLY_ALIVE };
 
        fw = iwl_get_ucode_image(priv, ucode_type);
        if (WARN_ON(!fw))
@@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 calib_complete[] = {
+       static const u16 calib_complete[] = {
                CALIBRATION_RES_NOTIFICATION,
                CALIBRATION_COMPLETE_NOTIFICATION
        };
index cc35f79..fa35da4 100644 (file)
@@ -76,7 +76,7 @@
 #define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  10
+#define IWL7260_UCODE_API_MIN  12
 #define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
index 72040cd..92709ad 100644 (file)
@@ -75,7 +75,7 @@
 #define IWL8000_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  10
+#define IWL8000_UCODE_API_MIN  12
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B          "nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C          "nvmData-8000C"
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO      28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO      21
+#define MAX_TX_AGG_SIZE_8260_SDIO      40
 
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO        IEEE80211_HT_MAX_AMPDU_32K
@@ -154,6 +155,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
        .led_mode = IWL_LED_RF_STATE,                                   \
        .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,           \
        .d0i3 = true,                                                   \
+       .features = NETIF_F_RXCSUM,                                     \
        .non_shared_ant = ANT_A,                                        \
        .dccm_offset = IWL8260_DCCM_OFFSET,                             \
        .dccm_len = IWL8260_DCCM_LEN,                                   \
@@ -203,6 +205,7 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
        .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -216,6 +219,7 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+       .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
index 08c14af..939fa22 100644 (file)
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
  *     mode set
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@ struct iwl_cfg {
        bool no_power_up_nic_in_init;
        const char *default_nvm_file_B_step;
        const char *default_nvm_file_C_step;
+       netdev_features_t features;
        unsigned int max_rx_agg_size;
        bool disable_dummy_notification;
        unsigned int max_tx_agg_size;
index faa17f2..543abea 100644 (file)
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
                                 CSR_INT_BIT_HW_ERR  | \
                                 CSR_INT_BIT_FH_TX   | \
                                 CSR_INT_BIT_SW_ERR  | \
+                                CSR_INT_BIT_PAGING  | \
                                 CSR_INT_BIT_RF_KILL | \
                                 CSR_INT_BIT_SW_RX   | \
                                 CSR_INT_BIT_WAKEUP  | \
@@ -422,6 +424,7 @@ enum {
 
 /* DRAM INT TABLE */
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER        (1 << 28)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
 /*
index 04e6649..71a78ce 100644 (file)
@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_PROTO(const struct device *dev,
                 struct sk_buff *skb,
-                void *data, size_t data_len),
-       TP_ARGS(dev, skb, data, data_len),
+                u8 hdr_len, size_t data_len),
+       TP_ARGS(dev, skb, hdr_len, data_len),
        TP_STRUCT__entry(
                DEV_ENTRY
 
@@ -45,7 +45,8 @@ TRACE_EVENT(iwlwifi_dev_tx_data,
        TP_fast_assign(
                DEV_ASSIGN;
                if (iwl_trace_data(skb))
-                       memcpy(__get_dynamic_array(data), data, data_len);
+                       skb_copy_bits(skb, hdr_len,
+                                     __get_dynamic_array(data), data_len);
        ),
        TP_printk("[%s] TX frame data", __get_str(dev))
 );
index 948ce08..eb4b99a 100644 (file)
@@ -36,7 +36,7 @@
 TRACE_EVENT(iwlwifi_dev_hcmd,
        TP_PROTO(const struct device *dev,
                 struct iwl_host_cmd *cmd, u16 total_size,
-                struct iwl_cmd_header *hdr),
+                struct iwl_cmd_header_wide *hdr),
        TP_ARGS(dev, cmd, total_size, hdr),
        TP_STRUCT__entry(
                DEV_ENTRY
@@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                __field(u32, flags)
        ),
        TP_fast_assign(
-               int i, offset = sizeof(*hdr);
+               int i, offset = sizeof(struct iwl_cmd_header);
+
+               if (hdr->group_id)
+                       offset = sizeof(struct iwl_cmd_header_wide);
 
                DEV_ASSIGN;
                __entry->flags = cmd->flags;
-               memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+               memcpy(__get_dynamic_array(hcmd), hdr, offset);
 
                for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                        if (!cmd->len[i])
@@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                        offset += cmd->len[i];
                }
        ),
-       TP_printk("[%s] hcmd %#.2x (%ssync)",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+       TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+                 ((u8 *)__get_dynamic_array(hcmd))[0],
                  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
index 6685259..a86aa5b 100644 (file)
@@ -372,6 +372,30 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
        return 0;
 }
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+                               const u32 len)
+{
+       struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+       struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+       if (len < sizeof(*fw_capa))
+               return -EINVAL;
+
+       capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+       capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+       capa->max_ap_cache_per_scan =
+               le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+       capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+       capa->max_scan_reporting_threshold =
+               le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+       capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+       capa->max_significant_change_aps =
+               le32_to_cpu(fw_capa->max_significant_change_aps);
+       capa->max_bssid_history_entries =
+               le32_to_cpu(fw_capa->max_bssid_history_entries);
+       return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -573,13 +597,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        size_t len = ucode_raw->size;
        const u8 *data;
        u32 tlv_len;
+       u32 usniffer_img;
        enum iwl_ucode_tlv_type tlv_type;
        const u8 *tlv_data;
        char buildstr[25];
-       u32 build;
+       u32 build, paging_mem_size;
        int num_of_cpus;
        bool usniffer_images = false;
        bool usniffer_req = false;
+       bool gscan_capa = false;
 
        if (len < sizeof(*ucode)) {
                IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                            IWL_UCODE_REGULAR_USNIFFER,
                                            tlv_len);
                        break;
+               case IWL_UCODE_TLV_PAGING:
+                       if (tlv_len != sizeof(u32))
+                               goto invalid_tlv_len;
+                       paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+                       IWL_DEBUG_FW(drv,
+                                    "Paging: paging enabled (size = %u bytes)\n",
+                                    paging_mem_size);
+
+                       if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+                               IWL_ERR(drv,
+                                       "Paging: driver supports up to %lu bytes for paging image\n",
+                                       MAX_PAGING_IMAGE_SIZE);
+                               return -EINVAL;
+                       }
+
+                       if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+                               IWL_ERR(drv,
+                                       "Paging: image isn't multiple %lu\n",
+                                       FW_PAGING_SIZE);
+                               return -EINVAL;
+                       }
+
+                       drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+                               paging_mem_size;
+                       usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+                       drv->fw.img[usniffer_img].paging_mem_size =
+                               paging_mem_size;
+                       break;
                case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
                        if (tlv_len != sizeof(u32))
                                goto invalid_tlv_len;
                        drv->fw.sdio_adma_addr =
                                le32_to_cpup((__le32 *)tlv_data);
                        break;
+               case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+                       if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+                               goto invalid_tlv_len;
+                       gscan_capa = true;
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
@@ -979,6 +1039,16 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           WARN(!gscan_capa,
+                "GSCAN is supported but capabilities TLV is unavailable\n"))
+               __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+                           capa->_capa);
+
        return 0;
 
  invalid_tlv_len:
index e57dbd0..af5b320 100644 (file)
@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *     Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *     &struct iwl_fw_error_dump_rb
  */
 enum iwl_fw_error_dump_type {
        /* 0 is deprecated */
@@ -97,6 +99,7 @@ enum iwl_fw_error_dump_type {
        IWL_FW_ERROR_DUMP_FH_REGS = 8,
        IWL_FW_ERROR_DUMP_MEM = 9,
        IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+       IWL_FW_ERROR_DUMP_RB = 11,
 
        IWL_FW_ERROR_DUMP_MAX,
 };
@@ -222,6 +225,20 @@ struct iwl_fw_error_dump_mem {
        u8 data[];
 };
 
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+       __le32 index;
+       __le32 rxq;
+       __le32 reserved;
+       u8 data[];
+};
+
 /**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
index a9b5ae4..75809ab 100644 (file)
@@ -132,12 +132,14 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_API_CHANGES_SET   = 29,
        IWL_UCODE_TLV_ENABLED_CAPABILITIES      = 30,
        IWL_UCODE_TLV_N_SCAN_CHANNELS           = 31,
+       IWL_UCODE_TLV_PAGING            = 32,
        IWL_UCODE_TLV_SEC_RT_USNIFFER   = 34,
        IWL_UCODE_TLV_SDIO_ADMA_ADDR    = 35,
        IWL_UCODE_TLV_FW_VERSION        = 36,
        IWL_UCODE_TLV_FW_DBG_DEST       = 38,
        IWL_UCODE_TLV_FW_DBG_CONF       = 39,
        IWL_UCODE_TLV_FW_DBG_TRIGGER    = 40,
+       IWL_UCODE_TLV_FW_GSCAN_CAPA     = 50,
 };
 
 struct iwl_ucode_tlv {
@@ -247,9 +249,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- *     regardless of the band or the number of the probes. FW will calculate
- *     the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
  *     through the dedicated host command.
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -266,7 +266,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
        IWL_UCODE_TLV_API_HDC_PHASE_0           = (__force iwl_ucode_tlv_api_t)10,
        IWL_UCODE_TLV_API_TX_POWER_DEV          = (__force iwl_ucode_tlv_api_t)11,
-       IWL_UCODE_TLV_API_BASIC_DWELL           = (__force iwl_ucode_tlv_api_t)13,
+       IWL_UCODE_TLV_API_WIDE_CMD_HDR          = (__force iwl_ucode_tlv_api_t)14,
        IWL_UCODE_TLV_API_SCD_CFG               = (__force iwl_ucode_tlv_api_t)15,
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = (__force iwl_ucode_tlv_api_t)16,
        IWL_UCODE_TLV_API_ASYNC_DTM             = (__force iwl_ucode_tlv_api_t)17,
@@ -284,6 +284,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
  * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *     tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +299,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -305,12 +307,14 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  *     IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *     is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
 enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)1,
        IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = (__force iwl_ucode_tlv_capa_t)2,
        IWL_UCODE_TLV_CAPA_BEAMFORMER                   = (__force iwl_ucode_tlv_capa_t)3,
+       IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)6,
        IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = (__force iwl_ucode_tlv_capa_t)8,
        IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = (__force iwl_ucode_tlv_capa_t)9,
@@ -320,10 +324,12 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
        IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
+       IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
+       IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT                = (__force iwl_ucode_tlv_capa_t)31,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -341,8 +347,9 @@ enum iwl_ucode_tlv_capa {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION       0xAAAABBBB
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)   (((ver) & 0xFF000000) >> 24)
@@ -412,6 +419,12 @@ enum iwl_fw_dbg_reg_operator {
        PRPH_ASSIGN,
        PRPH_SETBIT,
        PRPH_CLEARBIT,
+
+       INDIRECT_ASSIGN,
+       INDIRECT_SETBIT,
+       INDIRECT_CLEARBIT,
+
+       PRPH_BLOCKBIT,
 };
 
 /**
@@ -485,10 +498,13 @@ struct iwl_fw_dbg_conf_hcmd {
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *     collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
        IWL_FW_DBG_TRIGGER_START = BIT(0),
        IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+       IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -718,4 +734,28 @@ struct iwl_fw_dbg_conf_tlv {
        struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_fw_gscan_capabilities {
+       __le32 max_scan_cache_size;
+       __le32 max_scan_buckets;
+       __le32 max_ap_cache_per_scan;
+       __le32 max_rssi_sample_size;
+       __le32 max_scan_reporting_threshold;
+       __le32 max_hotlist_aps;
+       __le32 max_significant_change_aps;
+       __le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
index 3e3c9d8..45e7321 100644 (file)
@@ -133,6 +133,7 @@ struct fw_desc {
 struct fw_img {
        struct fw_desc sec[IWL_UCODE_SECTION_MAX];
        bool is_dual_cpus;
+       u32 paging_mem_size;
 };
 
 struct iwl_sf_region {
@@ -140,6 +141,48 @@ struct iwl_sf_region {
        u32 size;
 };
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS        0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+       dma_addr_t fw_paging_phys;
+       struct page *fw_paging_block;
+       u32 fw_paging_size;
+};
+
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
@@ -150,6 +193,30 @@ struct iwl_fw_cscheme_list {
        struct iwl_fw_cipher_scheme cs[];
 } __packed;
 
+/**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *     change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *     hold.
+ */
+struct iwl_gscan_capabilities {
+       u32 max_scan_cache_size;
+       u32 max_scan_buckets;
+       u32 max_ap_cache_per_scan;
+       u32 max_rssi_sample_size;
+       u32 max_scan_reporting_threshold;
+       u32 max_hotlist_aps;
+       u32 max_significant_change_aps;
+       u32 max_bssid_history_entries;
+};
+
 /**
  * struct iwl_fw - variables associated with the firmware
  *
@@ -208,6 +275,7 @@ struct iwl_fw {
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
+       struct iwl_gscan_capabilities gscan_capa;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
index b5bc959..6caf2af 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
                                continue;
 
                        for (i = 0; i < w->n_cmds; i++) {
-                               if (w->cmds[i] == pkt->hdr.cmd) {
+                               if (w->cmds[i] ==
+                                   WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
                                        found = true;
                                        break;
                                }
@@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_wait,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data)
@@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
        wait_entry->fn = fn;
        wait_entry->fn_data = fn_data;
        wait_entry->n_cmds = n_cmds;
-       memcpy(wait_entry->cmds, cmds, n_cmds);
+       memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
        wait_entry->triggered = false;
        wait_entry->aborted = false;
 
index 95af97a..dbe8234 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@ struct iwl_notification_wait {
                   struct iwl_rx_packet *pkt, void *data);
        void *fn_data;
 
-       u8 cmds[MAX_NOTIF_CMDS];
+       u16 cmds[MAX_NOTIF_CMDS];
        u8 n_cmds;
        bool triggered, aborted;
 };
@@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_data,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data);
index 80fefe7..3b8e85e 100644 (file)
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
-               /* The byte order is little endian 16 bit, meaning 214365 */
-               data->hw_addr[0] = hw_addr[1];
-               data->hw_addr[1] = hw_addr[0];
-               data->hw_addr[2] = hw_addr[3];
-               data->hw_addr[3] = hw_addr[2];
-               data->hw_addr[4] = hw_addr[5];
-               data->hw_addr[5] = hw_addr[4];
+               /*
+                * Store the MAC address from MAO section.
+                * No byte swapping is required in MAO section
+                */
+               memcpy(data->hw_addr, hw_addr, ETH_ALEN);
 
                /*
                 * Force the use of the OTP MAC address in case of reserved MAC
index ce1cdd7..71b450a 100644 (file)
@@ -148,8 +148,7 @@ struct iwl_op_mode_ops {
                                     const struct iwl_fw *fw,
                                     struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
-       int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb);
        void (*napi_add)(struct iwl_op_mode *op_mode,
                         struct napi_struct *napi,
                         struct net_device *napi_dev,
@@ -188,11 +187,10 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
        op_mode->ops->stop(op_mode);
 }
 
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
-       return op_mode->ops->rx(op_mode, rxb, cmd);
+       return op_mode->ops->rx(op_mode, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
index 5af1c77..3ab777f 100644 (file)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS     (16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK     (0x007F0000)
 #define SCD_GP_CTRL_ENABLE_31_QUEUES           BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE           BIT(18)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND    (SCD_MEM_LOWER_BOUND + 0x600)
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* tcp checksum offload */
+#define RX_EN_CSUM             (0x00a00d88)
+
 /* Oscillator clock */
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
@@ -379,6 +383,8 @@ enum aux_misc_master1_en {
 #define AUX_MISC_MASTER1_SMPHR_STATUS  0xA20800
 #define RSA_ENABLE                     0xA24B08
 #define PREG_AUX_BUS_WPROT_0           0xA04CC0
+#define SB_CPU_1_STATUS                        0xA01E30
+#define SB_CPU_2_STATUS                        0xA01E34
 
 /* FW chicken bits */
 #define LMPM_CHICK                     0xA01FF8
@@ -386,4 +392,10 @@ enum {
        LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF                   0xA03824
+enum {
+       LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif                         /* __iwl_prph_h__ */
index 87a230a..c829c50 100644 (file)
 #define INDEX_TO_SEQ(i)        ((i) & 0xff)
 #define SEQ_RX_FRAME   cpu_to_le16(0x8000)
 
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+       return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+       return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+       return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+       return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP  1
+
 /**
  * struct iwl_cmd_header
  *
  */
 struct iwl_cmd_header {
        u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
-       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+       u8 group_id;
        /*
         * The driver sets up the sequence number to values of its choosing.
         * uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@ struct iwl_cmd_header {
        __le16 sequence;
 } __packed;
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+       u8 cmd;
+       u8 group_id;
+       __le16 sequence;
+       __le16 length;
+       u8 reserved;
+       u8 version;
+} __packed;
 
 #define FH_RSCSR_FRAME_SIZE_MSK                0x00003FFF      /* bits 0-13 */
 #define FH_RSCSR_FRAME_INVALID         0x55550000
@@ -201,6 +248,8 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *     (i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *     check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
 enum CMD_MODE {
        CMD_ASYNC               = BIT(0),
@@ -210,6 +259,8 @@ enum CMD_MODE {
        CMD_SEND_IN_IDLE        = BIT(4),
        CMD_MAKE_TRANS_IDLE     = BIT(5),
        CMD_WAKE_UP_TRANS       = BIT(6),
+
+       CMD_TB_BITMAP_POS       = 11,
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -222,8 +273,18 @@ enum CMD_MODE {
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-       struct iwl_cmd_header hdr;      /* uCode API */
-       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       union {
+               struct {
+                       struct iwl_cmd_header hdr;      /* uCode API */
+                       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+               };
+               struct {
+                       struct iwl_cmd_header_wide hdr_wide;
+                       u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+                                       sizeof(struct iwl_cmd_header_wide) +
+                                       sizeof(struct iwl_cmd_header)];
+               };
+       };
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +322,22 @@ enum iwl_hcmd_dataflag {
  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  * @_rx_page_order: (internally used to free response packet)
  * @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
  * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ *     version and group as well
  */
 struct iwl_host_cmd {
        const void *data[IWL_MAX_CMD_TBS_PER_TFD];
        struct iwl_rx_packet *resp_pkt;
        unsigned long _rx_page_addr;
        u32 _rx_page_order;
-       int handler_status;
 
        u32 flags;
+       u32 id;
        u16 len[IWL_MAX_CMD_TBS_PER_TFD];
        u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
-       u8 id;
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +438,7 @@ enum iwl_trans_status {
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *     in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
  * @command_names: array of command names, must be 256 entries
  *     (one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +456,7 @@ struct iwl_trans_config {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        const char *const *command_names;
 
        u32 sdio_adma_addr;
@@ -544,10 +605,12 @@ struct iwl_trans_ops {
                              u32 value);
        void (*ref)(struct iwl_trans *trans);
        void (*unref)(struct iwl_trans *trans);
-       void (*suspend)(struct iwl_trans *trans);
+       int  (*suspend)(struct iwl_trans *trans);
        void (*resume)(struct iwl_trans *trans);
 
-       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+       struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+                                                struct iwl_fw_dbg_trigger_tlv
+                                                *trigger);
 };
 
 /**
@@ -584,6 +647,8 @@ enum iwl_d0i3_mode {
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *     0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -603,6 +668,12 @@ enum iwl_d0i3_mode {
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *     from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *     the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *     downloading them to the FW. The buffer is allocated in the opmode
  */
 struct iwl_trans {
        const struct iwl_trans_ops *ops;
@@ -612,6 +683,7 @@ struct iwl_trans {
        unsigned long status;
 
        struct device *dev;
+       u32 max_skb_frags;
        u32 hw_rev;
        u32 hw_id;
        char hw_id_str[52];
@@ -639,6 +711,14 @@ struct iwl_trans {
        struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
        u8 dbg_dest_reg_num;
 
+       /*
+        * Paging parameters - All of the parameters should be set by the
+        * opmode when paging is enabled
+        */
+       u32 paging_req_addr;
+       struct iwl_fw_paging *paging_db;
+       void *paging_download_buf;
+
        enum iwl_d0i3_mode d0i3_mode;
 
        bool wowlan_d0i3;
@@ -730,7 +810,8 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
        might_sleep();
-       trans->ops->d3_suspend(trans, test);
+       if (trans->ops->d3_suspend)
+               trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -738,6 +819,9 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
                                      bool test)
 {
        might_sleep();
+       if (!trans->ops->d3_resume)
+               return 0;
+
        return trans->ops->d3_resume(trans, status, test);
 }
 
@@ -753,10 +837,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
                trans->ops->unref(trans);
 }
 
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
 {
-       if (trans->ops->suspend)
-               trans->ops->suspend(trans);
+       if (!trans->ops->suspend)
+               return 0;
+
+       return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans *trans)
@@ -766,11 +852,12 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
 }
 
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+                   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        if (!trans->ops->dump_data)
                return NULL;
-       return trans->ops->dump_data(trans);
+       return trans->ops->dump_data(trans, trigger);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
index 2d7c3ea..8c2c3d1 100644 (file)
@@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
index b4737e2..e290ac6 100644 (file)
@@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
        }
 }
 
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+               return;
+       }
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+               return;
+       }
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
@@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(cmd.corun_lut40));
 
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
-                                   sizeof(cmd), &cmd);
+       if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+                                sizeof(cmd), &cmd))
+               IWL_ERR(mvm,
+                       "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
 }
index 6ac6de2..61c07b0 100644 (file)
@@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
        u8 __maybe_unused lower_bound, upper_bound;
-       int ret;
        u8 lut;
 
        struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        };
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
-               return 0;
+               return;
        cmd.data[0] = bt_cmd;
 
        bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(bt_cmd->bt4_corun_lut40));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (iwl_mvm_send_cmd(mvm, &cmd))
+               IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
 
        kfree(bt_cmd);
-       return ret;
 }
index beba375..b8ee312 100644 (file)
 #define IWL_MVM_QUOTA_THRESHOLD                        4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO            0
+#define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index 4165d10..04264e4 100644 (file)
@@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_d3;
-       static const u8 d3_notif[] = { D3_CONFIG_CMD };
+       static const u16 d3_notif[] = { D3_CONFIG_CMD };
        int ret;
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@ remove_notif:
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       ret = iwl_trans_suspend(mvm->trans);
+       if (ret)
+               return ret;
 
-       iwl_trans_suspend(mvm->trans);
        mvm->trans->wowlan_d0i3 = wowlan->any;
        if (mvm->trans->wowlan_d0i3) {
                /* 'any' trigger means d0i3 usage */
                if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_enter_d0i3_sync(mvm);
+                       ret = iwl_mvm_enter_d0i3_sync(mvm);
 
                        if (ret)
                                return ret;
@@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                mutex_lock(&mvm->d0i3_suspend_mutex);
                __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
                mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+               iwl_trans_d3_suspend(mvm->trans, false);
+
                return 0;
        }
 
@@ -1935,28 +1942,59 @@ out:
        return 1;
 }
 
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       iwl_trans_resume(mvm->trans);
+
+       return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+       bool exit_now;
+       enum iwl_d3_status d3_status;
+
+       iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+       /*
+        * make sure to clear D0I3_DEFER_WAKEUP before
+        * calling iwl_trans_resume(), which might wait
+        * for d0i3 exit completion.
+        */
+       mutex_lock(&mvm->d0i3_suspend_mutex);
+       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+                                       &mvm->d0i3_suspend_flags);
+       mutex_unlock(&mvm->d0i3_suspend_mutex);
+       if (exit_now) {
+               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+               _iwl_mvm_exit_d0i3(mvm);
+       }
 
        iwl_trans_resume(mvm->trans);
 
-       if (mvm->hw->wiphy->wowlan_config->any) {
-               /* 'any' trigger means d0i3 usage */
-               if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_exit_d0i3(hw->priv);
+       if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+               int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
 
-                       if (ret)
-                               return ret;
-                       /*
-                        * d0i3 exit will be deferred until reconfig_complete.
-                        * make sure there we are out of d0i3.
-                        */
-               }
-               return 0;
+               if (ret)
+                       return ret;
+               /*
+                * d0i3 exit will be deferred until reconfig_complete.
+                * make sure there we are out of d0i3.
+                */
        }
+       return 0;
+}
 
-       return __iwl_mvm_resume(mvm, false);
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       /* 'any' trigger means d0i3 was used */
+       if (hw->wiphy->wowlan_config->any)
+               return iwl_mvm_resume_d0i3(mvm);
+       else
+               return iwl_mvm_resume_d3(mvm);
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
index 5c8a65d..ddb1c84 100644 (file)
@@ -63,6 +63,7 @@
  *
  *****************************************************************************/
 #include "mvm.h"
+#include "fw-api-tof.h"
 #include "debugfs.h"
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+       int len = strlen(name);
+
+       return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+                                         char *buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = -EINVAL;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tof_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.tof_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.one_sided_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_debug_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_buf=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_buf_required = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_config_cmd(mvm);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.tof_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+                        cmd->tof_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+                        cmd->one_sided_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+                        cmd->is_debug_mode);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+                        cmd->is_buf_required);
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+                                                   char *buf,
+                                                   size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("burst_period=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (!ret)
+                       mvm->tof_data.responder_cfg.burst_period =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("burst_duration=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.burst_duration = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("abort_responder=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.abort_responder = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("get_ch_est=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.get_ch_est = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("channel_num=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.channel_num = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bandwidth=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.bandwidth = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("rate=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.rate = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bssid=", buf);
+       if (data) {
+               u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("toa_offset=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.toa_offset =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_per_burst = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("asap_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.asap_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+                                                  char __user *user_buf,
+                                                  size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_responder_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.responder_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+                        le16_to_cpu(cmd->burst_period));
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+                        cmd->burst_duration);
+       pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+                        cmd->bandwidth);
+       pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+                        cmd->channel_num);
+       pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+                        cmd->ctrl_ch_position);
+       pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+                        cmd->bssid);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+                        cmd->num_of_burst_exp);
+       pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+       pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+                        cmd->abort_responder);
+       pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+                        cmd->get_ch_est);
+       pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+                        cmd->recv_sta_req_params);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+                        cmd->ftm_per_burst);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+                        cmd->ftm_resp_ts_avail);
+       pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+                        cmd->asap_mode);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msecs = %d\n",
+                        le16_to_cpu(cmd->tsf_timer_offset_msecs));
+       pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+                        le16_to_cpu(cmd->toa_offset));
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+                                                char *buf, size_t count,
+                                                loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("request_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.request_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("initiator=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.initiator = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.one_sided_los_disable = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("req_timeout=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.req_timeout = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("report_policy=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.report_policy = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_random=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.macaddr_random = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_ap=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.num_of_ap = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_template=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("ap=", buf);
+       if (data) {
+               struct iwl_tof_range_req_ap_entry ap;
+               int size = sizeof(struct iwl_tof_range_req_ap_entry);
+               u16 burst_period;
+               u8 *mac = ap.bssid;
+               int i;
+
+               if (sscanf(data, "%d %hhd %hhx %hhx"
+                          "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+                          "%hhx %hhx %hx"
+                          "%hhx %hhx %x"
+                          "%hhx %hhx %hhx %hhx",
+                          &i, &ap.channel_num, &ap.bandwidth,
+                          &ap.ctrl_ch_position,
+                          mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+                          &ap.measure_type, &ap.num_of_bursts,
+                          &burst_period,
+                          &ap.samples_per_burst, &ap.retries_per_sample,
+                          &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+                          &ap.enable_dyn_ack, &ap.rssi) != 20) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               if (i > IWL_MVM_TOF_MAX_APS) {
+                       IWL_ERR(mvm, "Invalid AP index %d\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               ap.burst_period = cpu_to_le16(burst_period);
+
+               memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_request=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_cmd *cmd;
+       int i;
+
+       cmd = &mvm->tof_data.range_req;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+                        cmd->initiator);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+                        cmd->one_sided_los_disable);
+       pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+                        cmd->req_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+                        cmd->report_policy);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+                        cmd->macaddr_random);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+                        cmd->macaddr_template);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+                        cmd->macaddr_mask);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+                        cmd->num_of_ap);
+       for (i = 0; i < cmd->num_of_ap; i++) {
+               struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: channel_num=%hhx bw=%hhx"
+                               " control=%hhx bssid=%pM type=%hhx"
+                               " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+                               " retries=%hhx tsf_delta=%x location_req=%hhx "
+                               " asap=%hhx enable=%hhx rssi=%hhx\n",
+                               i, ap->channel_num, ap->bandwidth,
+                               ap->ctrl_ch_position, ap->bssid,
+                               ap->measure_type, ap->num_of_bursts,
+                               ap->burst_period, ap->samples_per_burst,
+                               ap->retries_per_sample, ap->tsf_delta,
+                               ap->location_req, ap->asap_mode,
+                               ap->enable_dyn_ack, ap->rssi);
+       }
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+                                                char *buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_ext_cmd *cmd;
+
+       cmd = &mvm->tof_data.range_req_ext;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msec = %hx\n",
+                        cmd->tsf_timer_offset_msec);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw20M = %hhx\n",
+                        cmd->ftm_format_and_bw20M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw40M = %hhx\n",
+                        cmd->ftm_format_and_bw40M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw80M = %hhx\n",
+                        cmd->ftm_format_and_bw80M);
+
+       mutex_unlock(&mvm->mutex);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+                                              char *buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       int abort_id;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("abort_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.last_abort_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_abort=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       abort_id = mvm->tof_data.last_abort_id;
+                       ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[32];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       int last_abort_id;
+
+       mutex_lock(&mvm->mutex);
+       last_abort_id = mvm->tof_data.last_abort_id;
+       mutex_unlock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+                        last_abort_id);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+                                                char __user *user_buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char *buf;
+       int pos = 0;
+       const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+       struct iwl_tof_range_rsp_ntfy *cmd;
+       int i, ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+       cmd = &mvm->tof_data.range_resp;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+                        cmd->request_status);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+                        cmd->last_in_batch);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+                        cmd->num_of_aps);
+       for (i = 0; i < cmd->num_of_aps; i++) {
+               struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+                               " rtt=%x rtt_var=%x rtt_spread=%x"
+                               " rssi=%hhx  rssi_spread=%hhx"
+                               " range=%x range_var=%x"
+                               " time_stamp=%x\n",
+                               i, ap->bssid, ap->measure_status,
+                               ap->measure_bw,
+                               ap->rtt, ap->rtt_variance, ap->rtt_spread,
+                               ap->rssi, ap->rssi_spread, ap->range,
+                               ap->range_variance, ap->timestamp);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
                                           size_t count, loff_t *ppos)
 {
@@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
                                         S_IRUSR | S_IWUSR);
 
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+           !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+               if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+                       MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+                                                mvmvif->dbgfs_dir,
+                                                S_IRUSR | S_IWUSR);
+
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+                                        S_IRUSR);
+       }
+
        /*
         * Create symlink for convenience pointing to interface specific
         * debugfs entries for the driver. For example, under
index ffb4b5c..17d7a05 100644 (file)
@@ -974,7 +974,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+       iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
index b1baa33..b86b169 100644 (file)
@@ -413,7 +413,7 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
index 5e4cbdb..660cc1c 100644 (file)
@@ -87,41 +87,6 @@ struct iwl_ssid_ie {
        u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK:  scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- *     (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- *     asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
-       SCAN_COMP_STATUS_OK = 0x1,
-       SCAN_COMP_STATUS_ABORT = 0x2,
-       SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-       SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-       SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-       SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-       SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-       SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-       SCAN_COMP_STATUS_ERR_COEX = 0x9,
-       SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-       SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-       SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN   16
@@ -143,71 +108,6 @@ enum scan_framework_client {
        SCAN_CLIENT_ASSET_TRACKING      = BIT(2),
 };
 
-/**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags:                see enum iwl_scan_flags
- * @channel_count:     channels in channel list
- * @quiet_time:                dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th:     quiet channel num of packets threshold
- * @good_CRC_th:       passive to active promotion threshold
- * @rx_chain:          RXON rx chain.
- * @max_out_time:      max TUs to be out of associated channel
- * @suspend_time:      pause scan this TUs when returning to service channel
- * @flags:             RXON flags
- * @filter_flags:      RXONfilter
- * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan:       list of SSIDs for directed active scan
- * @scan_type:         see enum iwl_scan_type.
- * @rep_count:         repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 good_CRC_th;
-       __le16 rx_chain;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd[2];
-       /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 scan_type;
-       __le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
-       IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE         = BIT(0),
-       IWL_SCAN_OFFLOAD_CHANNEL_NARROW         = BIT(22),
-       IWL_SCAN_OFFLOAD_CHANNEL_FULL           = BIT(24),
-       IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type:        bitmap; bits 1-20 are for directed scan to i'th ssid and
- *     see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd:          scan command fixed part
- * @data:              scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
-       struct iwl_scan_offload_cmd scan_cmd;
-       u8 data[0];
-} __packed;
-
 /**
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:              MAC address to filter out
@@ -297,35 +197,6 @@ enum iwl_scan_ebs_status {
        IWL_SCAN_EBS_INACTIVE,
 };
 
-/**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line:                last schedule line executed (fast or regular)
- * @last_schedule_iteration:   last scan iteration executed before scan abort
- * @status:                    enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
-       u8 last_schedule_line;
-       u8 last_schedule_iteration;
-       u8 status;
-       u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap:       SSIDs indexes found in this iteration
- * @client_bitmap:     clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
-       __le16 ssid_bitmap;
-       u8 client_bitmap;
-       u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
 /**
  * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
  * @tx_flags: combination of TX_CMD_FLG_*
@@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete {
 
 /* UMAC Scan API */
 
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size:      size of the command (not including header)
- * @reserved0: for future use and alignment
- * @ver:       API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
-       __le16 size;
-       u8 reserved0;
-       u8 ver;
-} __packed;
-
 /* The maximum of either of these cannot exceed 8, because we use an
  * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
  */
@@ -621,7 +480,6 @@ enum iwl_channel_flags {
 
 /**
  * struct iwl_scan_config
- * @hdr: umac command header
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@ enum iwl_channel_flags {
  * @channel_array:             default supported channels
  */
 struct iwl_scan_config {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -660,7 +517,8 @@ struct iwl_scan_config {
  * iwl_umac_scan_flags
  *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
  *     can be preempted by other scan requests with higher priority.
- *     The low priority scan is aborted.
+ *     The low priority scan will be resumed when the higher proirity scan is
+ *     completed.
  *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
  *     when scan starts.
  */
@@ -734,7 +592,6 @@ struct iwl_scan_req_umac_tail {
 
 /**
  * struct iwl_scan_req_umac
- * @hdr: umac command header
  * @flags: &enum iwl_umac_scan_flags
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -753,7 +610,6 @@ struct iwl_scan_req_umac_tail {
  *     &struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 uid;
        __le32 ooc_priority;
@@ -775,12 +631,10 @@ struct iwl_scan_req_umac {
 
 /**
  * struct iwl_umac_scan_abort
- * @hdr: umac command header
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 uid;
        __le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644 (file)
index 0000000..eed6271
--- /dev/null
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+       TOF_RANGE_REQ_CMD = 0x1,
+       TOF_CONFIG_CMD = 0x2,
+       TOF_RANGE_ABORT_CMD = 0x3,
+       TOF_RANGE_REQ_EXT_CMD = 0x4,
+       TOF_RESPONDER_CONFIG_CMD = 0x5,
+       TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+       TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+       TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+       TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+       TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+       TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 tof_disabled;
+       u8 one_sided_disabled;
+       u8 is_debug_mode;
+       u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *               The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *                The minimum delay between two sequential FTM Responses
+ *                in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *                The total time for all FTMs handshake in the same burst.
+ *                Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *                The number of bursts for the current ToF request. Affect
+ *                the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *                  (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *                      params and use the recomended Initiator params.
+ *                      0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *          the center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *               '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *             purposes, simulating station movement by adding various values
+ *             to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 burst_period;
+       u8 min_delta_ftm;
+       u8 burst_duration;
+       u8 num_of_burst_exp;
+       u8 get_ch_est;
+       u8 abort_responder;
+       u8 recv_sta_req_params;
+       u8 channel_num;
+       u8 bandwidth;
+       u8 rate;
+       u8 ctrl_ch_position;
+       u8 ftm_per_burst;
+       u8 ftm_resp_ts_avail;
+       u8 asap_mode;
+       u8 sta_id;
+       __le16 tsf_timer_offset_msecs;
+       __le16 toa_offset;
+       u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *                in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *                     value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *                     value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *                     value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 tsf_timer_offset_msec;
+       __le16 reserved;
+       u8 min_delta_ftm;
+       u8 ftm_format_and_bw20M;
+       u8 ftm_format_and_bw40M;
+       u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *          center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *                number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *               periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *                    1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *                     in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *            The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *                           Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *         0  Initiator interact with regular AP
+ *         1  Initiator interact with Responder machine: need to send the
+ *         Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *         use it for its ch est measurement (this flag will be set when we
+ *         configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *       leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+       u8 channel_num;
+       u8 bandwidth;
+       u8 tsf_delta_direction;
+       u8 ctrl_ch_position;
+       u8 bssid[ETH_ALEN];
+       u8 measure_type;
+       u8 num_of_bursts;
+       __le16 burst_period;
+       u8 samples_per_burst;
+       u8 retries_per_sample;
+       __le32 tsf_delta;
+       u8 location_req;
+       u8 asap_mode;
+       u8 enable_dyn_ack;
+       s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *                           possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *                              timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *                               earlier of: measurements completion / timeout
+ *                               expiration.
+ */
+enum iwl_tof_response_mode {
+       IWL_MVM_TOF_RESPOSE_ASAP = 1,
+       IWL_MVM_TOF_RESPOSE_TIMEOUT,
+       IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *             sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *                        '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *          This is equivalent to the session time configured to the
+ *          LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *                the range report will be uploaded as a batch when ready or
+ *                when the session is done (successfully / partially).
+ *                one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *                 '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *               Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 initiator;
+       u8 one_sided_los_disable;
+       u8 req_timeout;
+       u8 report_policy;
+       u8 los_det_disable;
+       u8 num_of_ap;
+       u8 macaddr_random;
+       u8 macaddr_template[ETH_ALEN];
+       u8 macaddr_mask[ETH_ALEN];
+       struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *      current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *            values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *             measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *            uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+       u8 bssid[ETH_ALEN];
+       u8 measure_status;
+       u8 measure_bw;
+       __le32 rtt;
+       __le32 rtt_variance;
+       __le32 rtt_spread;
+       s8 rssi;
+       u8 rssi_spread;
+       __le16 reserved;
+       __le32 range;
+       __le32 range_variance;
+       __le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+       u8 request_id;
+       u8 request_status;
+       u8 last_in_batch;
+       u8 num_of_aps;
+       struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+       u8 token;
+       u8 role;
+       __le16 reserved;
+       u8 initiator_bssid[ETH_ALEN];
+       u8 responder_bssid[ETH_ALEN];
+       u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+       u8 bssid[ETH_ALEN];
+       u8 request_token;
+       u8 status;
+       __le16 report_ie_len;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 reserved[3];
+} __packed;
+
+#endif
index 81c4ea3..853698a 100644 (file)
@@ -124,6 +124,18 @@ enum iwl_tx_flags {
        TX_CMD_FLG_HCCA_CHUNK           = BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+       PM_FRAME_NONE           = 0,
+       PM_FRAME_MGMT           = 2,
+       PM_FRAME_ASSOC          = 3,
+};
+
 /*
  * TX command security control
  */
index 16e9ef4..4af7513 100644 (file)
@@ -75,6 +75,7 @@
 #include "fw-api-coex.h"
 #include "fw-api-scan.h"
 #include "fw-api-stats.h"
+#include "fw-api-tof.h"
 
 /* Tx queue numbers */
 enum {
@@ -119,6 +120,9 @@ enum {
        ADD_STA = 0x18,
        REMOVE_STA = 0x19,
 
+       /* paging get item */
+       FW_GET_ITEM_CMD = 0x1a,
+
        /* TX */
        TX_CMD = 0x1c,
        TXPATH_FLUSH = 0x1e,
@@ -148,6 +152,9 @@ enum {
 
        LQ_CMD = 0x4e,
 
+       /* paging block to FW cpu2 */
+       FW_PAGING_BLOCK_CMD = 0x4f,
+
        /* Scan offload */
        SCAN_OFFLOAD_REQUEST_CMD = 0x51,
        SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -163,6 +170,10 @@ enum {
        CALIB_RES_NOTIF_PHY_DB = 0x6b,
        /* PHY_DB_CMD = 0x6c, */
 
+       /* ToF - 802.11mc FTM */
+       TOF_CMD = 0x10,
+       TOF_NOTIFICATION = 0x11,
+
        /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
        PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -365,6 +376,50 @@ struct iwl_nvm_access_cmd {
        u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
+#define NUM_OF_FW_PAGING_BLOCKS        33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+       __le32 flags;
+       __le32 block_size;
+       __le32 block_num;
+       __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *     download
+ */
+enum iwl_fw_item_id {
+       IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+       __le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+       __le32 item_id;
+       __le32 item_byte_cnt;
+       __le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
@@ -1080,10 +1135,33 @@ struct iwl_rx_phy_info {
        __le16 frame_time;
 } __packed;
 
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+       CSUM_RXA_RESERVED_MASK  = 0x000f,
+       CSUM_RXA_MICSIZE_MASK   = 0x00f0,
+       CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+       CSUM_RXA_PADD           = BIT(13),
+       CSUM_RXA_AMSDU          = BIT(14),
+       CSUM_RXA_ENA            = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
 struct iwl_rx_mpdu_res_start {
        __le16 byte_count;
-       __le16 reserved;
-} __packed;
+       __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
  * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1214,8 @@ enum iwl_rx_phy_flags {
  * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
  * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
  * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
  * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
  * @RX_MPDU_RES_STATUS_STA_ID_MSK:
  * @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1245,8 @@ enum iwl_mvm_rx_status {
        RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP               = BIT(13),
        RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT               = BIT(14),
        RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME             = BIT(15),
+       RX_MPDU_RES_STATUS_CSUM_DONE                    = BIT(16),
+       RX_MPDU_RES_STATUS_CSUM_OK                      = BIT(17),
        RX_MPDU_RES_STATUS_HASH_INDEX_MSK               = (0x3F0000),
        RX_MPDU_RES_STATUS_STA_ID_MSK                   = (0x1f000000),
        RX_MPDU_RES_STATUS_RRF_KILL                     = BIT(29),
index eb10c5e..4a0ce83 100644 (file)
@@ -106,6 +106,306 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+       int i;
+
+       if (!mvm->fw_paging_db[0].fw_paging_block)
+               return;
+
+       for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+               if (!mvm->fw_paging_db[i].fw_paging_block) {
+                       IWL_DEBUG_FW(mvm,
+                                    "Paging: block %d already freed, continue to next page\n",
+                                    i);
+
+                       continue;
+               }
+
+               __free_pages(mvm->fw_paging_db[i].fw_paging_block,
+                            get_order(mvm->fw_paging_db[i].fw_paging_size));
+       }
+       kfree(mvm->trans->paging_download_buf);
+       memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+       int sec_idx, idx;
+       u32 offset = 0;
+
+       /*
+        * find where is the paging image start point:
+        * if CPU2 exist and it's in paging format, then the image looks like:
+        * CPU1 sections (2 or more)
+        * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+        * CPU2 sections (not paged)
+        * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+        * non paged to CPU2 paging sec
+        * CPU2 paging CSS
+        * CPU2 paging image (including instruction and data)
+        */
+       for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+               if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+                       sec_idx++;
+                       break;
+               }
+       }
+
+       if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+               IWL_ERR(mvm, "driver didn't find paging image\n");
+               iwl_free_fw_paging(mvm);
+               return -EINVAL;
+       }
+
+       /* copy the CSS block to the dram */
+       IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+                    sec_idx);
+
+       memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+              image->sec[sec_idx].data,
+              mvm->fw_paging_db[0].fw_paging_size);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: copied %d CSS bytes to first block\n",
+                    mvm->fw_paging_db[0].fw_paging_size);
+
+       sec_idx++;
+
+       /*
+        * copy the paging blocks to the dram
+        * loop index start from 1 since that CSS block already copied to dram
+        * and CSS index is 0.
+        * loop stop at num_of_paging_blk since that last block is not full.
+        */
+       for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      mvm->fw_paging_db[idx].fw_paging_size);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d paging bytes to block %d\n",
+                            mvm->fw_paging_db[idx].fw_paging_size,
+                            idx);
+
+               offset += mvm->fw_paging_db[idx].fw_paging_size;
+       }
+
+       /* copy the last paging block */
+       if (mvm->num_of_pages_in_last_blk > 0) {
+               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+                      image->sec[sec_idx].data + offset,
+                      FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: copied %d pages in the last block %d\n",
+                            mvm->num_of_pages_in_last_blk, idx);
+       }
+
+       return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+                                  const struct fw_img *image)
+{
+       struct page *block;
+       dma_addr_t phys = 0;
+       int blk_idx = 0;
+       int order, num_of_pages;
+       int dma_enabled;
+
+       if (mvm->fw_paging_db[0].fw_paging_block)
+               return 0;
+
+       dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+       /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+       BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+       num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+       mvm->num_of_paging_blk = ((num_of_pages - 1) /
+                                   NUM_OF_PAGE_PER_GROUP) + 1;
+
+       mvm->num_of_pages_in_last_blk =
+               num_of_pages -
+               NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+                    mvm->num_of_paging_blk,
+                    mvm->num_of_pages_in_last_blk);
+
+       /* allocate block of 4Kbytes for paging CSS */
+       order = get_order(FW_PAGING_SIZE);
+       block = alloc_pages(GFP_KERNEL, order);
+       if (!block) {
+               /* free all the previous pages since we failed */
+               iwl_free_fw_paging(mvm);
+               return -ENOMEM;
+       }
+
+       mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+       mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+       if (dma_enabled) {
+               phys = dma_map_page(mvm->trans->dev, block, 0,
+                                   PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(mvm->trans->dev, phys)) {
+                       /*
+                        * free the previous pages and the current one since
+                        * we failed to map_page.
+                        */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+       } else {
+               mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+                       blk_idx << BLOCK_2_EXP_SIZE;
+       }
+
+       IWL_DEBUG_FW(mvm,
+                    "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+                    order);
+
+       /*
+        * allocate blocks in dram.
+        * since that CSS allocated in fw_paging_db[0] loop start from index 1
+        */
+       for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               /* allocate block of PAGING_BLOCK_SIZE (32K) */
+               order = get_order(PAGING_BLOCK_SIZE);
+               block = alloc_pages(GFP_KERNEL, order);
+               if (!block) {
+                       /* free all the previous pages since we failed */
+                       iwl_free_fw_paging(mvm);
+                       return -ENOMEM;
+               }
+
+               mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+               mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+               if (dma_enabled) {
+                       phys = dma_map_page(mvm->trans->dev, block, 0,
+                                           PAGE_SIZE << order,
+                                           DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(mvm->trans->dev, phys)) {
+                               /*
+                                * free the previous pages and the current one
+                                * since we failed to map_page.
+                                */
+                               iwl_free_fw_paging(mvm);
+                               return -ENOMEM;
+                       }
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+               } else {
+                       mvm->fw_paging_db[blk_idx].fw_paging_phys =
+                               PAGING_ADDR_SIG |
+                               blk_idx << BLOCK_2_EXP_SIZE;
+               }
+
+               IWL_DEBUG_FW(mvm,
+                            "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+                            order);
+       }
+
+       return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+                             const struct fw_img *fw)
+{
+       int ret;
+
+       ret = iwl_alloc_fw_paging_mem(mvm, fw);
+       if (ret)
+               return ret;
+
+       return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+       int blk_idx;
+       __le32 dev_phy_addr;
+       struct iwl_fw_paging_cmd fw_paging_cmd = {
+               .flags =
+                       cpu_to_le32(PAGING_CMD_IS_SECURED |
+                                   PAGING_CMD_IS_ENABLED |
+                                   (mvm->num_of_pages_in_last_blk <<
+                                   PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+               .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+               .block_num = cpu_to_le32(mvm->num_of_paging_blk),
+       };
+
+       /* loop for for all paging blocks + CSS block */
+       for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               dev_phy_addr =
+                       cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+                                   PAGE_2_EXP_SIZE);
+               fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+       int ret;
+       struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+               .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+       };
+
+       struct iwl_fw_get_item_resp *item_resp;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+               .data = { &fw_get_item_cmd, },
+       };
+
+       cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (ret) {
+               IWL_ERR(mvm,
+                       "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+                       ret);
+               return ret;
+       }
+
+       item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+       if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+               IWL_ERR(mvm,
+                       "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+                       le32_to_cpu(item_resp->item_id));
+               ret = -EIO;
+               goto exit;
+       }
+
+       mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+                                                 GFP_KERNEL);
+       if (!mvm->trans->paging_download_buf) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+       mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+       mvm->trans->paging_db = mvm->fw_paging_db;
+       IWL_DEBUG_FW(mvm,
+                    "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+                    mvm->trans->paging_req_addr);
+
+exit:
+       iwl_free_resp(&cmd);
+
+       return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
                         struct iwl_rx_packet *pkt, void *data)
 {
@@ -213,7 +513,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        const struct fw_img *fw;
        int ret, i;
        enum iwl_ucode_type old_type = mvm->cur_ucode;
-       static const u8 alive_cmd[] = { MVM_ALIVE };
+       static const u16 alive_cmd[] = { MVM_ALIVE };
        struct iwl_sf_region st_fwrd_space;
 
        if (ucode_type == IWL_UCODE_REGULAR &&
@@ -244,6 +544,11 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
                                    MVM_UCODE_ALIVE_TIMEOUT);
        if (ret) {
+               if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+                       IWL_ERR(mvm,
+                               "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+                               iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+                               iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
                mvm->cur_ucode = old_type;
                return ret;
        }
@@ -268,6 +573,40 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
+       /*
+        * configure and operate fw paging mechanism.
+        * driver configures the paging flow only once, CPU2 paging image
+        * included in the IWL_UCODE_INIT image.
+        */
+       if (fw->paging_mem_size) {
+               /*
+                * When dma is not enabled, the driver needs to copy / write
+                * the downloaded / uploaded page to / from the smem.
+                * This gets the location of the place were the pages are
+                * stored.
+                */
+               if (!is_device_dma_capable(mvm->trans->dev)) {
+                       ret = iwl_trans_get_paging_item(mvm);
+                       if (ret) {
+                               IWL_ERR(mvm, "failed to get FW paging item\n");
+                               return ret;
+                       }
+               }
+
+               ret = iwl_save_fw_paging(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to save the FW paging image\n");
+                       return ret;
+               }
+
+               ret = iwl_send_paging_cmd(mvm, fw);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to send the paging cmd\n");
+                       iwl_free_fw_paging(mvm);
+                       return ret;
+               }
+       }
+
        /*
         * Note: all the queues are enabled as part of the interface
         * initialization, but in firmware restart scenarios they
@@ -314,7 +653,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 init_complete[] = {
+       static const u16 init_complete[] = {
                INIT_COMPLETE_NOTIF,
                CALIB_RES_NOTIF_PHY_DB
        };
@@ -444,12 +783,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
-
        mem_cfg = (void *)pkt->data;
 
        mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,14 +806,18 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                le32_to_cpu(mem_cfg->page_buff_size);
        IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-exit:
        iwl_free_resp(&cmd);
 }
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay)
+                               struct iwl_fw_dbg_trigger_tlv *trigger)
 {
+       unsigned int delay = 0;
+
+       if (trigger)
+               delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
        if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
                return -EBUSY;
 
@@ -491,6 +828,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                 le32_to_cpu(desc->trig_desc.type));
 
        mvm->fw_dump_desc = desc;
+       mvm->fw_dump_trig = trigger;
 
        queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
@@ -498,7 +836,8 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 }
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay)
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_mvm_dump_desc *desc;
 
@@ -510,14 +849,13 @@ int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
        desc->trig_desc.type = cpu_to_le32(trig);
        memcpy(desc->trig_desc.data, str, len);
 
-       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+       return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
                                const char *fmt, ...)
 {
-       unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
        u16 occurrences = le16_to_cpu(trigger->occurrences);
        int ret, len = 0;
        char buf[64];
@@ -541,8 +879,9 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                len = strlen(buf) + 1;
        }
 
-       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-                                    len, delay);
+       ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+                                    trigger);
+
        if (ret)
                return ret;
 
@@ -676,8 +1015,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
-               iwl_mvm_get_shared_mem_conf(mvm);
+       iwl_mvm_get_shared_mem_conf(mvm);
 
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
@@ -760,6 +1098,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
+       if (iwl_mvm_is_csum_supported(mvm) &&
+           mvm->cfg->features & NETIF_F_RXCSUM)
+               iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
        /* allow FW/transport low power modes if not during restart */
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +1157,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +1169,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
                          (flags & SW_CARD_DISABLED) ? "Kill" : "On",
                          (flags & CT_KILL_CARD_DISABLED) ?
                          "Reached" : "Not reached");
-
-       return 0;
 }
 
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +1183,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
                       le32_to_cpu(mfuart_notif->external_ver),
                       le32_to_cpu(mfuart_notif->status),
                       le32_to_cpu(mfuart_notif->duration));
-       return 0;
 }
index 1812dd0..3424315 100644 (file)
@@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
        }
 }
 
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                        RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                }
        }
-
-       return 0;
 }
 
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
                iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                                   IEEE80211_IFACE_ITER_NORMAL,
                                                   iwl_mvm_beacon_loss_iterator,
                                                   mb);
-       return 0;
 }
index dfdab38..08dd674 100644 (file)
@@ -649,6 +649,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        }
 
+       hw->netdev_features |= mvm->cfg->features;
+       if (!iwl_mvm_is_csum_supported(mvm))
+               hw->netdev_features &= ~NETIF_F_RXCSUM;
+
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
                iwl_mvm_leds_exit(mvm);
@@ -1120,9 +1124,14 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        u32 file_len, fifo_data_len = 0;
        u32 smem_len = mvm->cfg->smem_len;
        u32 sram2_len = mvm->cfg->dccm2_len;
+       bool monitor_dump_only = false;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (mvm->fw_dump_trig &&
+           mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+               monitor_dump_only = true;
+
        fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
        if (!fw_error_dump)
                return;
@@ -1174,6 +1183,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                   fifo_data_len +
                   sizeof(*dump_info);
 
+       /* Make room for the SMEM, if it exists */
+       if (smem_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+       /* Make room for the secondary SRAM, if it exists */
+       if (sram2_len)
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+       /* If we only want a monitor dump, reset the file length */
+       if (monitor_dump_only) {
+               file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+                          sizeof(*dump_info);
+       }
+
        /*
         * In 8000 HW family B-step include the ICCM (which resides separately)
         */
@@ -1186,14 +1209,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
-       /* Make room for the SMEM, if it exists */
-       if (smem_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-       /* Make room for the secondary SRAM, if it exists */
-       if (sram2_len)
-               file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
        dump_file = vzalloc(file_len);
        if (!dump_file) {
                kfree(fw_error_dump);
@@ -1239,6 +1254,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
+       /* In case we only want monitor dump, skip to dump trasport data */
+       if (monitor_dump_only)
+               goto dump_trans_data;
+
        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
        dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
        dump_mem = (void *)dump_data->data;
@@ -1282,7 +1301,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                                         dump_mem->data, IWL8260_ICCM_LEN);
        }
 
-       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+       fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+                                                      mvm->fw_dump_trig);
        fw_error_dump->op_mode_len = file_len;
        if (fw_error_dump->trans_ptr)
                file_len += fw_error_dump->trans_ptr->len;
@@ -1291,6 +1312,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
                      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
+       mvm->fw_dump_trig = NULL;
        clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
@@ -1433,22 +1455,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
 {
-       bool exit_now;
-
        if (!iwl_mvm_is_d0i3_supported(mvm))
                return;
 
-       mutex_lock(&mvm->d0i3_suspend_mutex);
-       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
-                                       &mvm->d0i3_suspend_flags);
-       mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-       if (exit_now) {
-               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-               _iwl_mvm_exit_d0i3(mvm);
-       }
-
        if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
                if (!wait_event_timeout(mvm->d0i3_exit_waitq,
                                        !test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1664,6 +1673,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                goto out_unlock;
        }
 
+       mvmvif->features |= hw->netdev_features;
+
        ret = iwl_mvm_mac_ctxt_add(mvm, vif);
        if (ret)
                goto out_release;
@@ -2880,10 +2891,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               /* fall-through */
-       case WLAN_CIPHER_SUITE_CCMP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
                break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
                break;
@@ -3025,7 +3037,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
        int res, time_reg = DEVICE_SYSTEM_TIME_REG;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
-       static const u8 time_event_response[] = { HOT_SPOT_CMD };
+       static const u16 time_event_response[] = { HOT_SPOT_CMD };
        struct iwl_notification_wait wait_time_event;
        struct iwl_hs20_roc_req aux_roc_req = {
                .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
index 605f57a..58544f2 100644 (file)
@@ -80,6 +80,7 @@
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
+#include "tof.h"
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          5
@@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
  *     be up'ed after the INIT fw asserted. This is useful to be able to use
  *     proprietary tools over testmode to debug the INIT fw.
  * @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- *     Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
  */
 struct iwl_mvm_mod_params {
        bool init_dbg;
@@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data {
  *     # of received beacons accumulated over FW restart, and the current
  *     average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
  */
 struct iwl_mvm_vif {
        struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@ struct iwl_mvm_vif {
        /* Indicates that CSA countdown may be started */
        bool csa_countdown;
        bool csa_failed;
+
+       /* TCP Checksum Offload */
+       netdev_features_t features;
 };
 
 static inline struct iwl_mvm_vif *
@@ -607,6 +611,11 @@ struct iwl_mvm {
        /* NVM sections */
        struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
+       /* Paging section */
+       struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+       u16 num_of_paging_blk;
+       u16 num_of_pages_in_last_blk;
+
        /* EEPROM MAC addresses */
        struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
@@ -687,6 +696,7 @@ struct iwl_mvm {
         * can hold 16 keys at most. Reflect this fact.
         */
        unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+       u8 fw_key_deleted[STA_KEY_MAX_NUM];
 
        /* references taken by the driver and spinlock protecting them */
        spinlock_t refs_lock;
@@ -699,6 +709,7 @@ struct iwl_mvm {
        u8 fw_dbg_conf;
        struct delayed_work fw_dump_wk;
        struct iwl_mvm_dump_desc *fw_dump_desc;
+       struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 #ifdef CONFIG_IWLWIFI_LEDS
        struct led_classdev led;
@@ -823,6 +834,7 @@ struct iwl_mvm {
        struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
 
        u32 ciphers[6];
+       struct iwl_mvm_tof_data tof_data;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -942,6 +954,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_RRC;
 }
 
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -975,12 +993,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
                                  struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                                      u32 flags, u16 len, const void *data);
 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
                                         struct iwl_host_cmd *cmd,
                                         u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -989,10 +1007,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
                        struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                            struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta, __le16 fc);
@@ -1004,6 +1018,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+                                          struct iwl_tx_cmd *tx_cmd)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+       memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 {
        flush_work(&mvm->async_handlers_wk);
@@ -1012,9 +1037,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 /* Statistics */
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                                  struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
@@ -1060,27 +1084,19 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1107,12 +1123,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1136,29 +1150,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
                             struct cfg80211_sched_scan_request *req,
                             struct ieee80211_scan_ies *ies,
                             int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1197,9 +1206,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                 char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 #ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1255,9 +1263,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
 /* BT Coex */
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1275,9 +1282,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                               enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1286,9 +1292,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
                                         struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
                                        enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1377,9 +1382,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+                       struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1391,9 +1395,8 @@ struct iwl_mcc_update_resp *
 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                   enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb);
 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                                  const char *alpha2,
                                                  enum iwl_mcc_source src_id,
@@ -1432,8 +1435,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
                                        struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
 
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@@ -1443,10 +1445,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-                          const char *str, size_t len, unsigned int delay);
+                          const char *str, size_t len,
+                          struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
                                struct iwl_mvm_dump_desc *desc,
-                               unsigned int delay);
+                               struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
                                struct iwl_fw_dbg_trigger_tlv *trigger,
index 2a6be35..328187d 100644 (file)
@@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract NVM response */
        nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                return ERR_PTR(ret);
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract MCC response */
        mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        return retval;
 }
 
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
-               return 0;
+               return;
 
        mcc[0] = notif->mcc >> 8;
        mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
                      mcc, src);
        regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
        if (IS_ERR_OR_NULL(regd))
-               return 0;
+               return;
 
        regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
        kfree(regd);
-
-       return 0;
 }
index 3967df6..e7f6c01 100644 (file)
@@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
 }
 
 struct iwl_rx_handlers {
-       u8 cmd_id;
+       u16 cmd_id;
        bool async;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _async)       \
        { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)        \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
 
 /*
  * Handlers for fw notifications
@@ -221,7 +222,6 @@ struct iwl_rx_handlers {
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
        RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,9 +261,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+       RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 
 };
 #undef RX_HANDLER
+#undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
 static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
@@ -286,8 +288,10 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(PHY_CONFIGURATION_CMD),
        CMD(CALIB_RES_NOTIF_PHY_DB),
        CMD(SET_CALIB_DEFAULT_CMD),
+       CMD(FW_PAGING_BLOCK_CMD),
        CMD(ADD_STA_KEY),
        CMD(ADD_STA),
+       CMD(FW_GET_ITEM_CMD),
        CMD(REMOVE_STA),
        CMD(LQ_CMD),
        CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -470,6 +474,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
        trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
        trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+       trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+                                              IWL_UCODE_TLV_API_WIDE_CMD_HDR);
 
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
                trans_cfg.bc_table_dword = true;
@@ -576,6 +582,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        /* rpm starts with a taken ref. only set the appropriate bit here. */
        mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
+       iwl_mvm_tof_init(mvm);
+
        return op_mode;
 
  out_unregister:
@@ -623,14 +631,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
+       iwl_mvm_tof_clean(mvm);
+
        ieee80211_free_hw(mvm->hw);
 }
 
 struct iwl_async_handler_entry {
        struct list_head list;
        struct iwl_rx_cmd_buffer rxb;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +676,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
-               if (entry->fn(mvm, &entry->rxb, NULL))
-                       IWL_WARN(mvm,
-                                "returned value from ASYNC handlers are ignored\n");
+               entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
                kfree(entry);
@@ -698,24 +705,29 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                if (!cmds_trig->cmds[i].cmd_id)
                        break;
 
-               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+                   cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
                        continue;
 
                iwl_mvm_fw_dbg_collect_trig(mvm, trig,
-                                           "CMD 0x%02x received",
-                                           pkt->hdr.cmd);
+                                           "CMD 0x%02x.%02x received",
+                                           pkt->hdr.group_id, pkt->hdr.cmd);
                break;
        }
 }
 
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u8 i;
 
+       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+               iwl_mvm_rx_rx_mpdu(mvm, rxb);
+               return;
+       }
+
        iwl_mvm_rx_check_trigger(mvm, pkt);
 
        /*
@@ -729,16 +741,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
                struct iwl_async_handler_entry *entry;
 
-               if (rx_h->cmd_id != pkt->hdr.cmd)
+               if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
                        continue;
 
-               if (!rx_h->async)
-                       return rx_h->fn(mvm, rxb, cmd);
+               if (!rx_h->async) {
+                       rx_h->fn(mvm, rxb);
+                       return;
+               }
 
                entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
                /* we can't do much... */
                if (!entry)
-                       return 0;
+                       return;
 
                entry->rxb._page = rxb_steal_page(rxb);
                entry->rxb._offset = rxb->_offset;
@@ -750,8 +764,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                schedule_work(&mvm->async_handlers_wk);
                break;
        }
-
-       return 0;
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
@@ -903,7 +915,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
         * can't recover this since we're already half suspended.
         */
        if (!mvm->restart_fw && fw_error) {
-               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+               iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+                                           NULL);
        } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
                                    &mvm->status)) {
                struct iwl_mvm_reprobe *reprobe;
index d2c6ba9..c4e0890 100644 (file)
@@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
 static
 void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_beacon_filter_cmd *cmd)
+                                         struct iwl_beacon_filter_cmd *cmd,
+                                         bool d0i3)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       if (vif->bss_conf.cqm_rssi_thold) {
+       if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
                cmd->bf_energy_delta =
                        cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
                /* fw uses an absolute value for this */
@@ -287,27 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
        return true;
 }
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-       int numerator;
-       int dtim_interval = dtimper * bi;
-
-       if (WARN_ON(!dtim_interval))
-               return 0;
-
-       if (dtimper == 1) {
-               if (bi > 100)
-                       numerator = 408;
-               else
-                       numerator = 510;
-       } else if (dtimper < 10) {
-               numerator = 612;
-       } else {
-               return 0;
-       }
-       return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
@@ -377,11 +357,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        if (!radar_detect && (dtimper < 10) &&
            (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
             mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-               cmd->skip_dtim_periods =
-                       iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-               if (cmd->skip_dtim_periods)
-                       cmd->flags |=
-                               cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+               cmd->skip_dtim_periods = 3;
        }
 
        if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
@@ -509,9 +486,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
                       ETH_ALEN);
 }
 
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +496,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
-       return 0;
 }
 
 struct iwl_power_vifs {
@@ -810,7 +784,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
        if (!d0i3)
                iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
index daff1d0..19a7926 100644 (file)
@@ -2403,7 +2403,7 @@ struct rs_init_rate_info {
        u8 rate_idx;
 };
 
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2416,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
        { S8_MIN, IWL_RATE_1M_INDEX },
 };
 
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2427,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
        { S8_MIN, IWL_RATE_6M_INDEX },
 };
 
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+       { -60, IWL_RATE_MCS_7_INDEX },
+       { -64, IWL_RATE_MCS_6_INDEX },
+       { -68, IWL_RATE_MCS_5_INDEX },
+       { -72, IWL_RATE_MCS_4_INDEX },
+       { -80, IWL_RATE_MCS_3_INDEX },
+       { -84, IWL_RATE_MCS_2_INDEX },
+       { -85, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+       { -60, IWL_RATE_MCS_8_INDEX },
+       { -64, IWL_RATE_MCS_7_INDEX },
+       { -68, IWL_RATE_MCS_6_INDEX },
+       { -72, IWL_RATE_MCS_5_INDEX },
+       { -80, IWL_RATE_MCS_4_INDEX },
+       { -84, IWL_RATE_MCS_3_INDEX },
+       { -85, IWL_RATE_MCS_2_INDEX },
+       { -87, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+       { -60, IWL_RATE_MCS_9_INDEX },
+       { -64, IWL_RATE_MCS_8_INDEX },
+       { -68, IWL_RATE_MCS_7_INDEX },
+       { -72, IWL_RATE_MCS_6_INDEX },
+       { -80, IWL_RATE_MCS_5_INDEX },
+       { -84, IWL_RATE_MCS_4_INDEX },
+       { -85, IWL_RATE_MCS_3_INDEX },
+       { -87, IWL_RATE_MCS_2_INDEX },
+       { -88, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+                                struct ieee80211_sta *sta,
+                                struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+
+       if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+       else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+       else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               rate->type = LQ_LEGACY_A;
+       else
+               rate->type = LQ_LEGACY_G;
+
+       rate->bw = rs_bw_from_sta_bw(sta);
+       rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+       /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+       if (is_mimo(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+       } else if (is_siso(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+       } else {
+               lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+               if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+                       lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+               }
+       }
+
+       if (is_vht(rate)) {
+               if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+               }
+       } else if (is_ht(rate)) {
+               lq_sta->optimal_rates = rs_optimal_rates_ht;
+               lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+       }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+                                          struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+       int i;
+
+       rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+                                    BITS_PER_LONG);
+
+       for (i = 0; i < lq_sta->optimal_nentries; i++) {
+               int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+               if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+                   (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+                       rate->index = rate_idx;
+                       break;
+               }
+       }
+
+       rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+       return rate;
+}
+
 /* Choose an initial legacy rate and antenna to use based on the RSSI
  * of last Rx
  */
@@ -2468,12 +2586,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
 
        if (band == IEEE80211_BAND_5GHZ) {
                rate->type = LQ_LEGACY_A;
-               initial_rates = rs_init_rates_5ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+               initial_rates = rs_optimal_rates_5ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
        } else {
                rate->type = LQ_LEGACY_G;
-               initial_rates = rs_init_rates_24ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+               initial_rates = rs_optimal_rates_24ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
        }
 
        if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2614,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
                         struct iwl_lq_sta *lq_sta,
                         struct ieee80211_rx_status *rx_status)
 {
+       int i;
+
        lq_sta->pers.chains = rx_status->chains;
        lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
        lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
        lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+       lq_sta->pers.last_rssi = S8_MIN;
+
+       for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+               if (!(lq_sta->pers.chains & BIT(i)))
+                       continue;
+
+               if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+                       lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+       }
 }
 
 /**
@@ -2538,6 +2667,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        rate = &tbl->rate;
 
        rs_get_initial_rate(mvm, lq_sta, band, rate);
+       rs_init_optimal_rate(mvm, sta, lq_sta);
 
        WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
        if (rate->ant == ANT_A)
@@ -2560,6 +2690,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct rs_rate *optimal_rate;
+       u32 last_ucode_rate;
 
        if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
                /* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2715,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 
        iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
                                  info->band, &info->control.rates[0]);
-
        info->control.rates[0].count = 1;
+
+       /* Report the optimal rate based on rssi and STA caps if we haven't
+        * converged yet (too little traffic) or exploring other modulations
+        */
+       if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+               optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+               last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+                                                         optimal_rate);
+               iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+                                         &txrc->reported_rate);
+       }
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2747,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 #endif
        lq_sta->pers.chains = 0;
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+       lq_sta->pers.last_rssi = S8_MIN;
 
        return &sta_priv->lq_sta;
 }
index 2a3da31..81314ad 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@ struct iwl_lq_sta {
        u8 max_siso_rate_idx;
        u8 max_mimo2_rate_idx;
 
+       /* Optimal rate based on RSSI and STA caps.
+        * Used only to reflect link speed to userspace.
+        */
+       struct rs_rate optimal_rate;
+       unsigned long optimal_rate_mask;
+       const struct rs_init_rate_info *optimal_rates;
+       int optimal_nentries;
+
        u8 missed_rate_counter;
 
        struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@ struct iwl_lq_sta {
 #endif
                u8 chains;
                s8 chain_signal[IEEE80211_MAX_CHAINS];
+               s8 last_rssi;
                struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
                struct iwl_mvm *drv;
        } pers;
index 9ff0b43..a0c27cc 100644 (file)
@@ -61,6 +61,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include <linux/skbuff.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "fw-api.h"
@@ -71,8 +72,7 @@
  * Copies the phy information in mvm->last_phy_info, it will be used when the
  * actual data will come from the fw in the next packet.
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                spin_unlock(&mvm->drv_stats_lock);
        }
 #endif
-
-       return 0;
 }
 
 /*
@@ -237,13 +235,25 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
        return 0;
 }
 
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+                           struct sk_buff *skb,
+                           u32 status)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+       if (mvmvif->features & NETIF_F_RXCSUM &&
+           status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+           status & RX_MPDU_RES_STATUS_CSUM_OK)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 /*
  * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
  *
  * Handles the actual data of the Rx packet from the fw
  */
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_rx_status *rx_status;
@@ -271,7 +281,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        skb = alloc_skb(128, GFP_ATOMIC);
        if (!skb) {
                IWL_ERR(mvm, "alloc_skb failed\n");
-               return 0;
+               return;
        }
 
        rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +294,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
                               rx_pkt_status);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
                               phy_info->cfg_phy_cnt);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        /*
@@ -366,6 +376,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                }
        }
 
+       if (sta && ieee80211_is_data(hdr->frame_control))
+               iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
        rcu_read_unlock();
 
        /* set the preamble flag if appropriate */
@@ -431,7 +444,6 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 #endif
        iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
                                        crypt_len, rxb);
-       return 0;
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +635,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                iwl_rx_packet_payload_len(pkt));
 }
 
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
-       return 0;
 }
index 5de1449..1505546 100644 (file)
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-struct iwl_mvm_scan_params {
-       u32 max_out_time;
+enum iwl_mvm_scan_type {
+       IWL_SCAN_TYPE_UNASSOC,
+       IWL_SCAN_TYPE_WILD,
+       IWL_SCAN_TYPE_MILD,
+       IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+       IWL_MVM_TRAFFIC_LOW,
+       IWL_MVM_TRAFFIC_MEDIUM,
+       IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+       u32 dwell_active;
+       u32 dwell_passive;
+       u32 dwell_fragmented;
        u32 suspend_time;
-       bool passive_fragmented;
+       u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+       [IWL_SCAN_TYPE_UNASSOC] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 0,
+               .max_out_time = 0,
+       },
+       [IWL_SCAN_TYPE_WILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 30,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_MILD] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 120,
+               .max_out_time = 120,
+       },
+       [IWL_SCAN_TYPE_FRAGMENTED] = {
+               .dwell_active = 10,
+               .dwell_passive = 110,
+               .dwell_fragmented = 44,
+               .suspend_time = 95,
+               .max_out_time = 44,
+       },
+};
+
+struct iwl_mvm_scan_params {
+       enum iwl_mvm_scan_type type;
        u32 n_channels;
        u16 delay;
        int n_ssids;
@@ -90,15 +140,7 @@ struct iwl_mvm_scan_params {
        int n_match_sets;
        struct iwl_scan_probe_req preq;
        struct cfg80211_match_set *match_sets;
-       struct _dwell {
-               u16 passive;
-               u16 active;
-               u16 fragmented;
-       } dwell[IEEE80211_NUM_BANDS];
-       struct {
-               u8 iterations;
-               u8 full_scan_mul; /* not used for UMAC */
-       } schedule[2];
+       u8 iterations[2];
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -147,34 +189,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band, int n_ssids)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-               return 10;
-       if (band == IEEE80211_BAND_2GHZ)
-               return 20  + 3 * (n_ssids + 1);
-       return 10  + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-                                    enum ieee80211_band band)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-                       return 110;
-       return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
@@ -186,90 +200,39 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                *global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+       return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       struct iwl_mvm_scan_params *params)
 {
        int global_cnt = 0;
-       enum ieee80211_band band;
-       u8 frag_passive_dwell = 0;
+       enum iwl_mvm_traffic_load load;
+       bool low_latency;
 
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_cnt);
        if (!global_cnt)
-               goto not_bound;
-
-       params->suspend_time = 30;
-       params->max_out_time = 120;
-
-       if (iwl_mvm_low_latency(mvm)) {
-               if (fw_has_api(&mvm->fw->ucode_capa,
-                              IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-
-                       params->suspend_time = 105;
-                       /*
-                        * If there is more than one active interface make
-                        * passive scan more fragmented.
-                        */
-                       frag_passive_dwell = 40;
-                       params->max_out_time = frag_passive_dwell;
-               } else {
-                       params->suspend_time = 120;
-                       params->max_out_time = 120;
-               }
-       }
+               return IWL_SCAN_TYPE_UNASSOC;
 
-       if (frag_passive_dwell &&
-           fw_has_api(&mvm->fw->ucode_capa,
-                      IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-               /*
-                * P2P device scan should not be fragmented to avoid negative
-                * impact on P2P device discovery. Configure max_out_time to be
-                * equal to dwell time on passive channel. Take a longest
-                * possible value, one that corresponds to 2GHz band
-                */
-               if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(mvm,
-                                                         IEEE80211_BAND_2GHZ);
-                       params->max_out_time = passive_dwell;
-               } else {
-                       params->passive_fragmented = true;
-               }
-       }
-
-       if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-           (params->max_out_time > 200))
-               params->max_out_time = 200;
+       load = iwl_mvm_get_traffic_load(mvm);
+       low_latency = iwl_mvm_low_latency(mvm);
 
-not_bound:
+       if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+           vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+           fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+               return IWL_SCAN_TYPE_FRAGMENTED;
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].fragmented = frag_passive_dwell;
-
-               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-                                                                       band);
-               params->dwell[band].active =
-                       iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-       }
+       if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+               return IWL_SCAN_TYPE_MILD;
 
-       IWL_DEBUG_SCAN(mvm,
-                      "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-                      params->max_out_time, params->suspend_time,
-                      params->passive_fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_2GHZ].passive,
-                      params->dwell[IEEE80211_BAND_2GHZ].active,
-                      params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_5GHZ].passive,
-                      params->dwell[IEEE80211_BAND_5GHZ].active,
-                      params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+       return IWL_SCAN_TYPE_WILD;
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +290,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
        return buf;
 }
 
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +303,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
-
-       return 0;
 }
 
 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +326,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
        }
 }
 
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -392,9 +349,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
                WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+               IWL_DEBUG_SCAN(mvm,
+                              "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d\n",
                               aborted ? "aborted" : "completed",
-                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
        } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -406,9 +367,13 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
                WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
-               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+               IWL_DEBUG_SCAN(mvm,
+                              "Scheduled scan %s, EBS status %s, Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
                               aborted ? "aborted" : "completed",
-                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status),
+                              scan_notif->last_schedule_line,
+                              scan_notif->last_schedule_iteration,
+                              __le32_to_cpu(scan_notif->time_after_last_iter));
 
                mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
                ieee80211_sched_scan_stopped(mvm->hw);
@@ -426,8 +391,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        mvm->last_ebs_successful =
                        scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
                        scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
-       return 0;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,13 +714,11 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
@@ -794,7 +755,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
-       return params->schedule[0].iterations + params->schedule[1].iterations;
+       return params->iterations[0] + params->iterations[1];
 }
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -808,7 +769,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -861,11 +822,11 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        ssid_bitmap <<= 1;
 
        cmd->schedule[0].delay = cpu_to_le16(params->interval);
-       cmd->schedule[0].iterations = params->schedule[0].iterations;
-       cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+       cmd->schedule[0].iterations = params->iterations[0];
+       cmd->schedule[0].full_scan_mul = 1;
        cmd->schedule[1].delay = cpu_to_le16(params->interval);
-       cmd->schedule[1].iterations = params->schedule[1].iterations;
-       cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+       cmd->schedule[1].iterations = params->iterations[1];
+       cmd->schedule[1].full_scan_mul = 1;
 
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
                cmd->channel_opt[0].flags =
@@ -937,9 +898,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        int num_channels =
                mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
                mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int ret, i, j = 0, cmd_size, data_size;
+       int ret, i, j = 0, cmd_size;
        struct iwl_host_cmd cmd = {
-               .id = SCAN_CFG_CMD,
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
        };
 
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +912,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (!scan_config)
                return -ENOMEM;
 
-       data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-       scan_config->hdr.size = cpu_to_le16(data_size);
        scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,17 +972,15 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-       if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-       cmd->max_out_time = cpu_to_le32(params->max_out_time);
-       cmd->suspend_time = cpu_to_le32(params->suspend_time);
+       cmd->active_dwell = scan_timing[params->type].dwell_active;
+       cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
        cmd->scan_priority =
                iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
-       if (iwl_mvm_scan_total_iterations(params) == 0)
+       if (iwl_mvm_scan_total_iterations(params) == 1)
                cmd->ooc_priority =
                        iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
        else
@@ -1059,7 +1016,7 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-       if (params->passive_fragmented)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1099,8 +1056,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return uid;
 
        memset(cmd, 0, ksize(cmd));
-       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
 
        iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
@@ -1109,6 +1064,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->uid = cpu_to_le32(uid);
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
+       if (type == IWL_MVM_SCAN_SCHED)
+               cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -1227,17 +1185,15 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        params.n_match_sets = 0;
        params.match_sets = NULL;
 
-       params.schedule[0].iterations = 1;
-       params.schedule[0].full_scan_mul = 0;
-       params.schedule[1].iterations = 0;
-       params.schedule[1].full_scan_mul = 0;
+       params.iterations[0] = 1;
+       params.iterations[1] = 0;
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params,
                                        IWL_MVM_SCAN_REGULAR);
        } else {
@@ -1310,10 +1266,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        params.n_match_sets = req->n_match_sets;
        params.match_sets = req->match_sets;
 
-       params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-       params.schedule[0].full_scan_mul = 1;
-       params.schedule[1].iterations = 0xff;
-       params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+       params.iterations[0] = 0;
+       params.iterations[1] = 0xff;
+
+       params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
        if (req->interval > U16_MAX) {
                IWL_DEBUG_SCAN(mvm,
@@ -1336,8 +1292,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                params.delay = req->delay;
        }
 
-       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
        ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
        if (ret)
                return ret;
@@ -1345,7 +1299,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1371,9 +1325,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1381,7 +1334,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
        if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-               return 0;
+               return;
 
        /* if the scan is already stopping, we don't need to notify mac80211 */
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1392,26 +1345,24 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        }
 
        mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
        IWL_DEBUG_SCAN(mvm,
-                      "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+                      "Scan completed, uid %u type %u, status %s, EBS status %s, Last line %d, Last iteration %d, Time from last iteration %d\n",
                       uid, mvm->scan_uid_status[uid],
                       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      iwl_mvm_ebs_status_str(notif->ebs_status));
+                      iwl_mvm_ebs_status_str(notif->ebs_status),
+                      notif->last_schedule, notif->last_iter,
+                      __le32_to_cpu(notif->time_from_last_iter));
 
        if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
            notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
                mvm->last_ebs_successful = false;
 
        mvm->scan_uid_status[uid] = 0;
-
-       return 0;
 }
 
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1423,15 +1374,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
-       struct iwl_umac_scan_abort cmd = {
-               .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-                                       sizeof(struct iwl_mvm_umac_cmd_hdr)),
-       };
+       struct iwl_umac_scan_abort cmd = {};
        int uid, ret;
 
        lockdep_assert_held(&mvm->mutex);
@@ -1448,7 +1395,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  iwl_cmd_id(SCAN_ABORT_UMAC,
+                                             IWL_ALWAYS_LONG_GROUP, 0),
+                                  0, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
@@ -1458,7 +1408,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+       static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
                                              SCAN_OFFLOAD_COMPLETE, };
        int ret;
 
index 3d2fbf1..df216cd 100644 (file)
@@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
 {
-       int i;
+       int i, max = -1, max_offs = -1;
 
        lockdep_assert_held(&mvm->mutex);
 
-       i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+       /* Pick the unused key offset with the highest 'deleted'
+        * counter. Every time a key is deleted, all the counters
+        * are incremented and the one that was just deleted is
+        * reset to zero. Thus, the highest counter is the one
+        * that was deleted longest ago. Pick that one.
+        */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (test_bit(i, mvm->fw_key_table))
+                       continue;
+               if (mvm->fw_key_deleted[i] > max) {
+                       max = mvm->fw_key_deleted[i];
+                       max_offs = i;
+               }
+       }
 
-       if (i == STA_KEY_MAX_NUM)
+       if (max_offs < 0)
                return STA_KEY_IDX_INVALID;
 
-       __set_bit(i, mvm->fw_key_table);
+       __set_bit(max_offs, mvm->fw_key_table);
 
-       return i;
+       return max_offs;
 }
 
 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1399,6 +1412,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
        int ret;
+       static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1465,7 +1479,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 end:
        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta->addr, ret);
+                     sta ? sta->addr : zero_addr, ret);
        return ret;
 }
 
@@ -1476,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
-       int ret;
+       int ret, i;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1495,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                return -ENOENT;
        }
 
+       /* track which key was deleted last */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (mvm->fw_key_deleted[i] < U8_MAX)
+                       mvm->fw_key_deleted[i]++;
+       }
+       mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
        if (sta_id == IWL_MVM_STATION_COUNT) {
                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
                return 0;
@@ -1658,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1668,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
        u32 sta_id = le32_to_cpu(notif->sta_id);
 
        if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-               return 0;
+               return;
 
        rcu_read_lock();
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
        if (!IS_ERR_OR_NULL(sta))
                ieee80211_sta_eosp(sta);
        rcu_read_unlock();
-
-       return 0;
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index 748f5dc..eedb215 100644 (file)
@@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta, u32 iv32,
                             u16 *phase1key);
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
index a87b506..fe2fa56 100644 (file)
@@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
 
-       if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
-               goto exit;
+       WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
 
        /* we don't really care about the response at this point */
 
-exit:
        iwl_free_resp(&cmd);
 }
 
@@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
                mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
 
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* can fail sometimes */
        if (!le32_to_cpu(notif->status)) {
                iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-               goto out;
+               return;
        }
 
        if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
-               goto out;
+               return;
 
        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                        lockdep_is_held(&mvm->mutex));
        /* the station may not be here, but if it is, it must be a TDLS peer */
        if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
-               goto out;
+               return;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        vif = mvmsta->vif;
@@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                         msecs_to_jiffies(delay));
 
        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
-       return 0;
 }
 
 static int
@@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
        cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
 
        info = IEEE80211_SKB_CB(skb);
-       if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+       hdr = (void *)skb->data;
+       if (info->control.hw_key) {
+               if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+                       rcu_read_unlock();
+                       ret = -EINVAL;
+                       goto out;
+               }
+               iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+       }
 
        iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
                           mvmsta->sta_id);
 
-       hdr = (void *)skb->data;
        iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
                                hdr->frame_control);
        rcu_read_unlock();
index d24b6a8..dbd7d54 100644 (file)
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 {
        lockdep_assert_held(&mvm->time_event_lock);
 
-       if (te_data->id == TE_MAX)
+       if (!te_data->vif)
                return;
 
        list_del(&te_data->list);
@@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
 /*
  * The Rx handler for time event notifications
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
        }
 unlock:
        spin_unlock_bh(&mvm->time_event_lock);
-
-       return 0;
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct iwl_mvm_time_event_data *te_data,
                                       struct iwl_time_event_cmd *te_cmd)
 {
-       static const u8 time_event_response[] = { TIME_EVENT_CMD };
+       static const u16 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
        int ret;
 
@@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+       const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
        struct iwl_notification_wait wait_te_notif;
        struct iwl_time_event_cmd time_cmd = {};
 
@@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
        time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 
-       time_cmd.apply_time =
-               cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+       time_cmd.apply_time = cpu_to_le32(0);
 
        time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(max_delay);
index de4fbc6..cbdf8e5 100644 (file)
@@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
 /*
  * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /**
  * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644 (file)
index 0000000..380972f
--- /dev/null
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+
+       tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (IWL_MVM_TOF_IS_RESPONDER) {
+               tof_data->responder_cfg.sub_grp_cmd_id =
+                       cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+               tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+       }
+#endif
+
+       tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+       tof_data->range_req.req_timeout = 1;
+       tof_data->range_req.initiator = 1;
+       tof_data->range_req.report_policy = 3;
+
+       tof_data->range_req_ext.sub_grp_cmd_id =
+               cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+                            struct ieee80211_vif *vif)
+{
+       bool *enabled = _data;
+
+       /* non bss vif exists */
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+               *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+       struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+       bool enabled;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_tof_iterator, &enabled);
+       if (!enabled) {
+               IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+               return -EINVAL;
+       }
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+       struct iwl_tof_range_abort_cmd cmd = {
+               .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+               .request_id = id,
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+                       id, mvm->tof_data.active_range_request);
+               return -EINVAL;
+       }
+
+       /* after abort is sent there's no active request anymore */
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+               IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+               return -EIO;
+       }
+
+       cmd->sta_id = mvmvif->bcast_sta.sta_id;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .len = { sizeof(mvm->tof_data.range_req), },
+               /* no copy because of the command size */
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+               return -EIO;
+       }
+
+       /* nesting of range requests is not supported in FW */
+       if (mvm->tof_data.active_range_request !=
+               IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+               IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+                       mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+       cmd.data[0] = &mvm->tof_data.range_req;
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+               return -EIO;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(mvm->tof_data.range_req_ext),
+                                   &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+       if (resp->request_id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+                       resp->request_id, mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       memcpy(&mvm->tof_data.range_resp, resp,
+              sizeof(struct iwl_tof_range_rsp_ntfy));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+       IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+       return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_neighbor_report *report =
+               (struct iwl_tof_neighbor_report *)data;
+
+       IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+                      report->bssid, report->request_token, report->status);
+       return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+       case TOF_RANGE_RESPONSE_NOTIF:
+               iwl_mvm_tof_range_resp(mvm, resp->data);
+               break;
+       case TOF_MCSI_DEBUG_NOTIF:
+               iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+               break;
+       case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+               iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+               break;
+       default:
+              IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+                      resp->sub_grp_cmd_id);
+              break;
+       }
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644 (file)
index 0000000..50ae8ad
--- /dev/null
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+       struct iwl_tof_config_cmd tof_cfg;
+       struct iwl_tof_range_req_cmd range_req;
+       struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+       struct iwl_tof_range_rsp_ntfy range_resp;
+       u8 last_abort_id;
+       u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
index 80d07db..fe7145c 100644 (file)
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        int temp;
 
        /* the notification is handled synchronously in ctkill, so skip here */
        if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
-               return 0;
+               return;
 
        temp = iwl_mvm_temp_notif_parse(mvm, pkt);
        if (temp < 0)
-               return 0;
+               return;
 
        iwl_mvm_tt_temp_changed(mvm, temp);
-
-       return 0;
 }
 
 static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_temp_notif;
-       static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+       static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
        int ret, temp;
 
        lockdep_assert_held(&mvm->mutex);
index 7ba7a11..6df5aad 100644 (file)
@@ -153,18 +153,20 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        if (ieee80211_is_mgmt(fc)) {
                if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+               else if (ieee80211_is_action(fc))
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
                else
-                       tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+                       tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
                /* The spec allows Action frames in A-MPDU, we don't support
                 * it
                 */
                WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
        } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-               tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
        } else {
-               tx_cmd->pm_frame_timeout = 0;
+               tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
        }
 
        if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -252,7 +254,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 
        if (info->band == IEEE80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
-               rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
                rate_flags =
                        BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -268,19 +270,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int hdrlen)
 {
        struct ieee80211_key_conf *keyconf = info->control.hw_key;
+       u8 *crypto_hdr = skb_frag->data + hdrlen;
+       u64 pn;
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-               if (info->flags & IEEE80211_TX_CTL_AMPDU)
-                       tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+               pn = atomic64_inc_return(&keyconf->tx_pn);
+               crypto_hdr[0] = pn;
+               crypto_hdr[2] = 0;
+               crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+               crypto_hdr[1] = pn >> 8;
+               crypto_hdr[4] = pn >> 16;
+               crypto_hdr[5] = pn >> 24;
+               crypto_hdr[6] = pn >> 32;
+               crypto_hdr[7] = pn >> 40;
                break;
 
        case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +320,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     struct ieee80211_sta *sta, u8 sta_id)
+                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +337,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 
        iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 
@@ -346,6 +358,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
+       int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
        if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
@@ -366,23 +379,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
        /*
-        * If the interface on which frame is sent is the P2P_DEVICE
+        * If the interface on which the frame is sent is the P2P_DEVICE
         * or an AP/GO interface use the broadcast station associated
-        * with it; otherwise use the AUX station.
+        * with it; otherwise if the interface is a managed interface
+        * use the AP station associated with it for multicast traffic
+        * (this is not possible for unicast packets as a TLDS discovery
+        * response are sent without a station entry); otherwise use the
+        * AUX station.
         */
-       if (info->control.vif &&
-           (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-            info->control.vif->type == NL80211_IFTYPE_AP)) {
+       sta_id = mvm->aux_sta.sta_id;
+       if (info->control.vif) {
                struct iwl_mvm_vif *mvmvif =
                        iwl_mvm_vif_from_mac80211(info->control.vif);
-               sta_id = mvmvif->bcast_sta.sta_id;
-       } else {
-               sta_id = mvm->aux_sta.sta_id;
+
+               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info->control.vif->type == NL80211_IFTYPE_AP)
+                       sta_id = mvmvif->bcast_sta.sta_id;
+               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+                        is_multicast_ether_addr(hdr->addr1)) {
+                       u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+                       if (ap_sta_id != IWL_MVM_STATION_COUNT)
+                               sta_id = ap_sta_id;
+               }
        }
 
        IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
 
@@ -390,7 +414,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +440,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
        bool is_data_qos = false, is_ampdu = false;
+       int hdrlen;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        fc = hdr->frame_control;
+       hdrlen = ieee80211_hdrlen(fc);
 
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
@@ -426,7 +452,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
 
@@ -458,7 +484,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        }
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
@@ -911,8 +937,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        rcu_read_unlock();
 }
 
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +946,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                iwl_mvm_rx_tx_cmd_single(mvm, pkt);
        else
                iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
-       return 0;
 }
 
 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +965,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
                (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +987,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
                      tid >= IWL_MAX_TID_COUNT,
                      "sta_id %d tid %d", sta_id, tid))
-               return 0;
+               return;
 
        rcu_read_lock();
 
@@ -974,7 +996,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* Reclaiming frames for a station that has been deleted ? */
        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1007,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        "invalid BA notification: Q %d, tid %d, flow %d\n",
                        tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1094,6 @@ out:
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(mvm->hw, skb);
        }
-
-       return 0;
 }
 
 /*
index 03f8e06..a7d4342 100644 (file)
@@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
        return ret;
 }
 
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                         u32 flags, u16 len, const void *data)
 {
        struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
                goto out_free_resp;
        }
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               ret = -EIO;
-               goto out_free_resp;
-       }
-
        resp_len = iwl_rx_packet_payload_len(pkt);
        if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
                ret = -EIO;
@@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
 /*
  * We assume that the caller set the status to the sucess value
  */
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
                                const void *data, u32 *status)
 {
        struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
        return fw_rate_idx_to_plcp[rate_idx];
 }
 
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                le32_to_cpu(err_resp->error_service));
        IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
                le64_to_cpu(err_resp->timestamp));
-       return 0;
 }
 
 /*
index 2ed1e4d..b0825c4 100644 (file)
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
@@ -613,6 +614,7 @@ static int iwl_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_trans *trans = pci_get_drvdata(pdev);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
 
        /* Before you put code here, think about WoWLAN. You cannot check here
@@ -630,20 +632,16 @@ static int iwl_pci_resume(struct device *device)
                return 0;
 
        /*
-        * On suspend, ict is disabled, and the interrupt mask
-        * gets cleared. Reconfigure them both in case of d0i3
-        * image. Otherwise, only enable rfkill interrupt (in
-        * order to keep track of the rfkill status)
+        * Enable rfkill interrupt (in order to keep track of
+        * the rfkill status)
         */
-       if (trans->wowlan_d0i3) {
-               iwl_pcie_reset_ict(trans);
-               iwl_enable_interrupts(trans);
-       } else {
-               iwl_enable_rfkill_int(trans);
-       }
+       iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+
+       mutex_lock(&trans_pcie->mutex);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       mutex_unlock(&trans_pcie->mutex);
 
        return 0;
 }
index 31f72a6..feb2f7e 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
 /*
  * RX related structures and functions
  */
@@ -299,8 +305,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -320,9 +328,11 @@ struct iwl_trans_pcie {
        dma_addr_t ict_tbl_dma;
        int ict_index;
        bool use_ict;
+       bool is_down;
        struct isr_statistics isr_stats;
 
        spinlock_t irq_lock;
+       struct mutex mutex;
        u32 inta_mask;
        u32 scd_base_addr;
        struct iwl_dma_ptr scd_bc_tbls;
@@ -349,6 +359,7 @@ struct iwl_trans_pcie {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        u32 rx_page_order;
 
        const char *const *command_names;
@@ -420,7 +431,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status);
+                           struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
index a3fbaa0..5643ace 100644 (file)
@@ -281,12 +281,13 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  * iwl_pcie_rx_alloc_page - allocates and returns a page.
  *
  */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+                                          gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct page *page;
-       gfp_t gfp_mask = GFP_KERNEL;
+       gfp_t gfp_mask = priority;
 
        if (rxq->free_count > RX_LOW_WATERMARK)
                gfp_mask |= __GFP_NOWARN;
@@ -324,7 +325,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -340,7 +341,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
                spin_unlock(&rxq->lock);
 
                /* Alloc a new receive buffer */
-               page = iwl_pcie_rx_alloc_page(trans);
+               page = iwl_pcie_rx_alloc_page(trans, priority);
                if (!page)
                        return;
 
@@ -414,7 +415,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  */
 static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-       iwl_pcie_rxq_alloc_rbs(trans);
+       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
        iwl_pcie_rxq_restock(trans);
 }
@@ -429,17 +430,22 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       struct list_head local_empty;
+       int pending = atomic_xchg(&rba->req_pending, 0);
 
-       while (atomic_read(&rba->req_pending)) {
+       IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+       /* If we were scheduled - there is at least one request */
+       spin_lock(&rba->lock);
+       /* swap out the rba->rbd_empty to a local list */
+       list_replace_init(&rba->rbd_empty, &local_empty);
+       spin_unlock(&rba->lock);
+
+       while (pending) {
                int i;
-               struct list_head local_empty;
                struct list_head local_allocated;
 
                INIT_LIST_HEAD(&local_allocated);
-               spin_lock(&rba->lock);
-               /* swap out the entire rba->rbd_empty to a local list */
-               list_replace_init(&rba->rbd_empty, &local_empty);
-               spin_unlock(&rba->lock);
 
                for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
                        struct iwl_rx_mem_buffer *rxb;
@@ -457,7 +463,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                        BUG_ON(rxb->page);
 
                        /* Alloc a new receive buffer */
-                       page = iwl_pcie_rx_alloc_page(trans);
+                       page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
                        if (!page)
                                continue;
                        rxb->page = page;
@@ -481,16 +487,28 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                        i++;
                }
 
+               pending--;
+               if (!pending) {
+                       pending = atomic_xchg(&rba->req_pending, 0);
+                       IWL_DEBUG_RX(trans,
+                                    "Pending allocation requests = %d\n",
+                                    pending);
+               }
+
                spin_lock(&rba->lock);
                /* add the allocated rbds to the allocator allocated list */
                list_splice_tail(&local_allocated, &rba->rbd_allocated);
-               /* add the unused rbds back to the allocator empty list */
-               list_splice_tail(&local_empty, &rba->rbd_empty);
+               /* get more empty RBDs for current pending requests */
+               list_splice_tail_init(&rba->rbd_empty, &local_empty);
                spin_unlock(&rba->lock);
 
-               atomic_dec(&rba->req_pending);
                atomic_inc(&rba->req_ready);
        }
+
+       spin_lock(&rba->lock);
+       /* return unused rbds to the allocator empty list */
+       list_splice_tail(&local_empty, &rba->rbd_empty);
+       spin_unlock(&rba->lock);
 }
 
 /*
@@ -507,13 +525,16 @@ static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i;
 
-       if (atomic_dec_return(&rba->req_ready) < 0) {
-               atomic_inc(&rba->req_ready);
-               IWL_DEBUG_RX(trans,
-                            "Allocation request not ready, pending requests = %d\n",
-                            atomic_read(&rba->req_pending));
+       /*
+        * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+        * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+        * function will return -ENOMEM, as there are no ready requests.
+        * atomic_dec_if_positive will perofrm the *actual* decrement only if
+        * req_ready > 0, i.e. - there are ready requests and the function
+        * hands one request to the caller.
+        */
+       if (atomic_dec_if_positive(&rba->req_ready) < 0)
                return -ENOMEM;
-       }
 
        spin_lock(&rba->lock);
        for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
@@ -777,18 +798,21 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
  */
 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
                                  struct iwl_rx_mem_buffer *rxb,
-                                 struct iwl_rxq *rxq)
+                                 struct iwl_rxq *rxq, bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
-       /* Count the used RBDs */
-       rxq->used_count++;
-
        /* Move the RBD to the used list, will be moved to allocator in batches
         * before claiming or posting a request*/
        list_add_tail(&rxb->list, &rxq->rx_used);
 
+       if (unlikely(emergency))
+               return;
+
+       /* Count the allocator owned RBDs */
+       rxq->used_count++;
+
        /* If we have RX_POST_REQ_ALLOC new released rx buffers -
         * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
         * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
@@ -807,7 +831,8 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
 }
 
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-                               struct iwl_rx_mem_buffer *rxb)
+                               struct iwl_rx_mem_buffer *rxb,
+                               bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -823,10 +848,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 
        while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
                struct iwl_rx_packet *pkt;
-               struct iwl_device_cmd *cmd;
                u16 sequence;
                bool reclaim;
-               int index, cmd_index, err, len;
+               int index, cmd_index, len;
                struct iwl_rx_cmd_buffer rxcb = {
                        ._offset = offset,
                        ._rx_page_order = trans_pcie->rx_page_order,
@@ -874,12 +898,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
-                       cmd = NULL;
-
-               err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+               iwl_op_mode_rx(trans->op_mode, &rxcb);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -897,7 +916,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         * iwl_trans_send_cmd()
                         * as we reclaim the driver command queue */
                        if (!rxcb._page_stolen)
-                               iwl_pcie_hcmd_complete(trans, &rxcb, err);
+                               iwl_pcie_hcmd_complete(trans, &rxcb);
                        else
                                IWL_WARN(trans, "Claim null rxb?\n");
                }
@@ -928,13 +947,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
 /*
@@ -944,7 +963,8 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i, j;
+       u32 r, i, j, count = 0;
+       bool emergency = false;
 
 restart:
        spin_lock(&rxq->lock);
@@ -960,12 +980,15 @@ restart:
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
+               if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+                       emergency = true;
+
                rxb = rxq->queue[i];
                rxq->queue[i] = NULL;
 
                IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
                             r, i, rxb);
-               iwl_pcie_rx_handle_rb(trans, rxb);
+               iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
                i = (i + 1) & RX_QUEUE_MASK;
 
@@ -975,10 +998,16 @@ restart:
                        struct iwl_rb_allocator *rba = &trans_pcie->rba;
                        struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
 
-                       /* Add the remaining 6 empty RBDs for allocator use */
-                       spin_lock(&rba->lock);
-                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-                       spin_unlock(&rba->lock);
+                       if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+                           !emergency) {
+                               /* Add the remaining 6 empty RBDs
+                               * for allocator use
+                                */
+                               spin_lock(&rba->lock);
+                               list_splice_tail_init(&rxq->rx_used,
+                                                     &rba->rbd_empty);
+                               spin_unlock(&rba->lock);
+                       }
 
                        /* If not ready - continue, will try to reclaim later.
                        * No need to reschedule work - allocator exits only on
@@ -995,9 +1024,22 @@ restart:
                                }
                        }
                }
-               /* handle restock for two cases:
+               if (emergency) {
+                       count++;
+                       if (count == 8) {
+                               count = 0;
+                               if (rxq->used_count < RX_QUEUE_SIZE / 3)
+                                       emergency = false;
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+                               spin_lock(&rxq->lock);
+                       }
+               }
+               /* handle restock for three cases, can be all of them at once:
                * - we just pulled buffers from the allocator
-               * - we have 8+ unstolen pages accumulated */
+               * - we have 8+ unstolen pages accumulated
+               * - we are in emergency and allocated buffers
+                */
                if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
                        rxq->read = i;
                        spin_unlock(&rxq->lock);
@@ -1010,6 +1052,21 @@ restart:
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
+       /*
+        * handle a case where in emergency there are some unallocated RBDs.
+        * those RBDs are in the used list, but are not tracked by the queue's
+        * used_count which counts allocator owned RBDs.
+        * unallocated emergency RBDs must be allocated on exit, otherwise
+        * when called again the function may not be in emergency mode and
+        * they will be handed to the allocator with no tracking in the RBD
+        * allocator counters, which will lead to them never being claimed back
+        * by the queue.
+        * by allocating them here, they are now in the queue free list, and
+        * will be restocked by the next call of iwl_pcie_rxq_restock.
+        */
+       if (unlikely(emergency && count))
+               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
 }
@@ -1020,6 +1077,7 @@ restart:
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
@@ -1043,6 +1101,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        iwl_trans_fw_error(trans);
        local_bh_enable();
 
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+               del_timer(&trans_pcie->txq[i].stuck_timer);
+
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        wake_up(&trans_pcie->wait_command_queue);
 }
@@ -1251,7 +1312,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                isr_stats->rfkill++;
 
+               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+               mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
                        set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1443,8 +1506,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
 
        val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-       val |= CSR_DRAM_INT_TBL_ENABLE;
-       val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+       val |= CSR_DRAM_INT_TBL_ENABLE |
+              CSR_DRAM_INIT_TBL_WRAP_CHECK |
+              CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
        IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
index 43ae658..ceea2d5 100644 (file)
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
-       if (!trans->cfg->apmg_not_supported)
+       if (trans->cfg->apmg_not_supported)
                return;
 
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
-               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+                       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                                    CSR_HW_IF_CONFIG_REG_PREPARE |
                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+                       mdelay(1);
+                       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               }
                mdelay(5);
        }
 
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
+       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
+       msleep(1);
+
        for (iter = 0; iter < 10; iter++) {
                /* If HW is not ready, prepare the conditions to check again */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
                do {
                        ret = iwl_pcie_set_hw_ready(trans);
-                       if (ret >= 0)
-                               return 0;
+                       if (ret >= 0) {
+                               ret = 0;
+                               goto out;
+                       }
 
                        usleep_range(200, 1000);
                        t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
        IWL_ERR(trans, "Couldn't prepare the card\n");
 
+out:
+       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
        return ret;
 }
 
@@ -764,8 +780,15 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -813,8 +836,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
        for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
                last_read_idx = i;
 
+               /*
+                * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+                * CPU1 to CPU2.
+                * PAGING_SEPARATOR_SECTION delimiter - separate between
+                * CPU2 non paged to CPU2 paging sec.
+                */
                if (!image->sec[i].data ||
-                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+                   image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+                   image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
                        IWL_DEBUG_FW(trans,
                                     "Break since Data not valid or Empty section, sec = %d\n",
                                     i);
@@ -881,6 +911,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                case PRPH_CLEARBIT:
                        iwl_clear_bits_prph(trans, addr, BIT(val));
                        break;
+               case PRPH_BLOCKBIT:
+                       if (iwl_read_prph(trans, addr) & BIT(val)) {
+                               IWL_ERR(trans,
+                                       "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+                                       val, addr);
+                               goto monitor;
+                       }
+                       break;
                default:
                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
                                dest->reg_ops[i].op);
@@ -888,6 +926,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                }
        }
 
+monitor:
        if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
                               trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -982,13 +1021,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                                   const struct fw_img *fw, bool run_in_rfkill)
 {
-       int ret;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+
+       /* Someone called stop_device, don't try to start_fw */
+       if (trans_pcie->is_down) {
+               IWL_WARN(trans,
+                        "Can't start_fw since the HW hasn't been started\n");
+               ret = EIO;
+               goto out;
+       }
 
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        iwl_enable_rfkill_int(trans);
@@ -1000,15 +1051,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        else
                clear_bit(STATUS_RFKILL, &trans->status);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-       if (hw_rfkill && !run_in_rfkill)
-               return -ERFKILL;
+       if (hw_rfkill && !run_in_rfkill) {
+               ret = -ERFKILL;
+               goto out;
+       }
 
        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
        ret = iwl_pcie_nic_init(trans);
        if (ret) {
                IWL_ERR(trans, "Unable to init nic\n");
-               return ret;
+               goto out;
        }
 
        /* make sure rfkill handshake bits are cleared */
@@ -1026,9 +1079,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* Load the given image to the HW */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               return iwl_pcie_load_given_ucode_8000(trans, fw);
+               ret = iwl_pcie_load_given_ucode_8000(trans, fw);
        else
-               return iwl_pcie_load_given_ucode(trans, fw);
+               ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1037,11 +1094,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill, was_hw_rfkill;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
+       if (trans_pcie->is_down)
+               return;
+
+       trans_pcie->is_down = true;
+
        was_hw_rfkill = iwl_is_rfkill_set(trans);
 
        /* tell the device to stop sending interrupts */
@@ -1131,14 +1195,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        iwl_pcie_prepare_card_hw(trans);
 }
 
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       mutex_lock(&trans_pcie->mutex);
+       _iwl_trans_pcie_stop_device(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+}
+
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
+       struct iwl_trans_pcie __maybe_unused *trans_pcie =
+               IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&trans_pcie->mutex);
+
        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               iwl_trans_pcie_stop_device(trans, true);
+               _iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans->wowlan_d0i3) {
+               /* Enable persistence mode to avoid reset */
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+       }
+
        iwl_disable_interrupts(trans);
 
        /*
@@ -1150,17 +1236,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 
        iwl_pcie_disable_ict(trans);
 
+       synchronize_irq(trans_pcie->pci_dev->irq);
+
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-       /*
-        * reset TX queues -- some of their registers reset during S3
-        * so if we don't reset everything here the D3 image would try
-        * to execute some invalid memory upon resume
-        */
-       iwl_trans_pcie_tx_reset(trans);
+       if (!trans->wowlan_d0i3) {
+               /*
+                * reset TX queues -- some of their registers reset during S3
+                * so if we don't reset everything here the D3 image would try
+                * to execute some invalid memory upon resume
+                */
+               iwl_trans_pcie_tx_reset(trans);
+       }
 
        iwl_pcie_set_pwr(trans, true);
 }
@@ -1202,12 +1292,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
        iwl_pcie_set_pwr(trans, false);
 
-       iwl_trans_pcie_tx_reset(trans);
+       if (trans->wowlan_d0i3) {
+               iwl_clear_bit(trans, CSR_GP_CNTRL,
+                             CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       } else {
+               iwl_trans_pcie_tx_reset(trans);
 
-       ret = iwl_pcie_rx_init(trans);
-       if (ret) {
-               IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-               return ret;
+               ret = iwl_pcie_rx_init(trans);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Failed to resume the device (RX reset)\n");
+                       return ret;
+               }
        }
 
        val = iwl_read32(trans, CSR_RESET);
@@ -1219,11 +1315,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
        err = iwl_pcie_prepare_card_hw(trans);
        if (err) {
                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1240,20 +1339,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        /* From now on, the op_mode will be kept updated about RF kill state */
        iwl_enable_rfkill_int(trans);
 
+       /* Set is_down to false here so that...*/
+       trans_pcie->is_down = false;
+
        hw_rfkill = iwl_is_rfkill_set(trans);
        if (hw_rfkill)
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
+       /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
        return 0;
 }
 
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+       ret = _iwl_trans_pcie_start_hw(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+
+       return ret;
+}
+
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       mutex_lock(&trans_pcie->mutex);
+
        /* disable interrupts - don't enable HW RF kill interrupt */
        spin_lock(&trans_pcie->irq_lock);
        iwl_disable_interrupts(trans);
@@ -1266,6 +1383,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
        spin_unlock(&trans_pcie->irq_lock);
 
        iwl_pcie_disable_ict(trans);
+
+       mutex_unlock(&trans_pcie->mutex);
+
+       synchronize_irq(trans_pcie->pci_dev->irq);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1326,6 +1447,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        else
                trans_pcie->rx_page_order = get_order(4 * 1024);
 
+       trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2169,6 +2291,47 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
        return prph_len;
 }
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+                                  struct iwl_fw_error_dump_data **data,
+                                  int allocated_rb_nums)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       u32 i, r, j, rb_len = 0;
+
+       spin_lock(&rxq->lock);
+
+       r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+       for (i = rxq->read, j = 0;
+            i != r && j < allocated_rb_nums;
+            i = (i + 1) & RX_QUEUE_MASK, j++) {
+               struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+               struct iwl_fw_error_dump_rb *rb;
+
+               dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+                              DMA_FROM_DEVICE);
+
+               rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+               (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+               rb = (void *)(*data)->data;
+               rb->index = cpu_to_le32(i);
+               memcpy(rb->data, page_address(rxb->page), max_len);
+               /* remap the page for the free benefit */
+               rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+                                                    max_len,
+                                                    DMA_FROM_DEVICE);
+
+               *data = iwl_fw_error_next_data(*data);
+       }
+
+       spin_unlock(&rxq->lock);
+
+       return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2238,17 +2401,97 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
        return monitor_len;
 }
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+                           struct iwl_fw_error_dump_data **data,
+                           u32 monitor_len)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 len = 0;
+
+       if ((trans_pcie->fw_mon_page &&
+            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+           trans->dbg_dest_tlv) {
+               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+               u32 base, write_ptr, wrap_cnt;
+
+               /* If there was a dest TLV - use the values from there */
+               if (trans->dbg_dest_tlv) {
+                       write_ptr =
+                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+               } else {
+                       base = MON_BUFF_BASE_ADDR;
+                       write_ptr = MON_BUFF_WRPTR;
+                       wrap_cnt = MON_BUFF_CYCLE_CNT;
+               }
+
+               (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+               fw_mon_data = (void *)(*data)->data;
+               fw_mon_data->fw_mon_wr_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
+               fw_mon_data->fw_mon_cycle_cnt =
+                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+               fw_mon_data->fw_mon_base_ptr =
+                       cpu_to_le32(iwl_read_prph(trans, base));
+
+               len += sizeof(**data) + sizeof(*fw_mon_data);
+               if (trans_pcie->fw_mon_page) {
+                       /*
+                        * The firmware is now asserted, it won't write anything
+                        * to the buffer. CPU can take ownership to fetch the
+                        * data. The buffer will be handed back to the device
+                        * before the firmware will be restarted.
+                        */
+                       dma_sync_single_for_cpu(trans->dev,
+                                               trans_pcie->fw_mon_phys,
+                                               trans_pcie->fw_mon_size,
+                                               DMA_FROM_DEVICE);
+                       memcpy(fw_mon_data->data,
+                              page_address(trans_pcie->fw_mon_page),
+                              trans_pcie->fw_mon_size);
+
+                       monitor_len = trans_pcie->fw_mon_size;
+               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+                       /*
+                        * Update pointers to reflect actual values after
+                        * shifting
+                        */
+                       base = iwl_read_prph(trans, base) <<
+                              trans->dbg_dest_tlv->base_shift;
+                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
+                                          monitor_len / sizeof(u32));
+               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+                       monitor_len =
+                               iwl_trans_pci_dump_marbh_monitor(trans,
+                                                                fw_mon_data,
+                                                                monitor_len);
+               } else {
+                       /* Didn't match anything - output no monitor data */
+                       monitor_len = 0;
+               }
+
+               len += monitor_len;
+               (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+       }
+
+       return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+                         struct iwl_fw_dbg_trigger_tlv *trigger)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_fw_error_dump_data *data;
        struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
        struct iwl_fw_error_dump_txcmd *txcmd;
        struct iwl_trans_dump_data *dump_data;
-       u32 len;
+       u32 len, num_rbs;
        u32 monitor_len;
        int i, ptr;
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
        /* transport dump header */
        len = sizeof(*dump_data);
@@ -2257,22 +2500,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += sizeof(*data) +
                cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
-       /* CSR registers */
-       len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-       /* PRPH registers */
-       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-               /* The range includes both boundaries */
-               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-                       iwl_prph_dump_addr[i].start + 4;
-
-               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-                       num_bytes_in_chunk;
-       }
-
-       /* FH registers */
-       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
        /* FW monitor */
        if (trans_pcie->fw_mon_page) {
                len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2300,6 +2527,45 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                monitor_len = 0;
        }
 
+       if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+               dump_data = vzalloc(len);
+               if (!dump_data)
+                       return NULL;
+
+               data = (void *)dump_data->data;
+               len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+               dump_data->len = len;
+
+               return dump_data;
+       }
+
+       /* CSR registers */
+       len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+       /* PRPH registers */
+       for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+               /* The range includes both boundaries */
+               int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+                       iwl_prph_dump_addr[i].start + 4;
+
+               len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+                      num_bytes_in_chunk;
+       }
+
+       /* FH registers */
+       len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+       if (dump_rbs) {
+               /* RBs */
+               num_rbs = le16_to_cpu(ACCESS_ONCE(
+                                     trans_pcie->rxq.rb_stts->closed_rb_num))
+                                     & 0x0FFF;
+               num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+               len += num_rbs * (sizeof(*data) +
+                                 sizeof(struct iwl_fw_error_dump_rb) +
+                                 (PAGE_SIZE << trans_pcie->rx_page_order));
+       }
+
        dump_data = vzalloc(len);
        if (!dump_data)
                return NULL;
@@ -2336,74 +2602,10 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
        len += iwl_trans_pcie_dump_prph(trans, &data);
        len += iwl_trans_pcie_dump_csr(trans, &data);
        len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-       /* data is already pointing to the next section */
-
-       if ((trans_pcie->fw_mon_page &&
-            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-           trans->dbg_dest_tlv) {
-               struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-               u32 base, write_ptr, wrap_cnt;
-
-               /* If there was a dest TLV - use the values from there */
-               if (trans->dbg_dest_tlv) {
-                       write_ptr =
-                               le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-                       wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-                       base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-               } else {
-                       base = MON_BUFF_BASE_ADDR;
-                       write_ptr = MON_BUFF_WRPTR;
-                       wrap_cnt = MON_BUFF_CYCLE_CNT;
-               }
+       if (dump_rbs)
+               len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
-               data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-               fw_mon_data = (void *)data->data;
-               fw_mon_data->fw_mon_wr_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, write_ptr));
-               fw_mon_data->fw_mon_cycle_cnt =
-                       cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-               fw_mon_data->fw_mon_base_ptr =
-                       cpu_to_le32(iwl_read_prph(trans, base));
-
-               len += sizeof(*data) + sizeof(*fw_mon_data);
-               if (trans_pcie->fw_mon_page) {
-                       /*
-                        * The firmware is now asserted, it won't write anything
-                        * to the buffer. CPU can take ownership to fetch the
-                        * data. The buffer will be handed back to the device
-                        * before the firmware will be restarted.
-                        */
-                       dma_sync_single_for_cpu(trans->dev,
-                                               trans_pcie->fw_mon_phys,
-                                               trans_pcie->fw_mon_size,
-                                               DMA_FROM_DEVICE);
-                       memcpy(fw_mon_data->data,
-                              page_address(trans_pcie->fw_mon_page),
-                              trans_pcie->fw_mon_size);
-
-                       monitor_len = trans_pcie->fw_mon_size;
-               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-                       /*
-                        * Update pointers to reflect actual values after
-                        * shifting
-                        */
-                       base = iwl_read_prph(trans, base) <<
-                              trans->dbg_dest_tlv->base_shift;
-                       iwl_trans_read_mem(trans, base, fw_mon_data->data,
-                                          monitor_len / sizeof(u32));
-               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-                       monitor_len =
-                               iwl_trans_pci_dump_marbh_monitor(trans,
-                                                                fw_mon_data,
-                                                                monitor_len);
-               } else {
-                       /* Didn't match anything - output no monitor data */
-                       monitor_len = 0;
-               }
-
-               len += monitor_len;
-               data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-       }
+       len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
        dump_data->len = len;
 
@@ -2459,23 +2661,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
-       int err;
+       int ret;
 
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
        if (!trans)
                return ERR_PTR(-ENOMEM);
 
+       trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
        spin_lock_init(&trans_pcie->ref_lock);
+       mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-       err = pci_enable_device(pdev);
-       if (err)
+       ret = pci_enable_device(pdev);
+       if (ret)
                goto out_no_pci;
 
        if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2696,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (err) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (!err)
-                       err = pci_set_consistent_dma_mask(pdev,
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!ret)
+               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (ret) {
+               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!ret)
+                       ret = pci_set_consistent_dma_mask(pdev,
                                                          DMA_BIT_MASK(32));
                /* both attempts failed: */
-               if (err) {
+               if (ret) {
                        dev_err(&pdev->dev, "No suitable DMA available\n");
                        goto out_pci_disable_device;
                }
        }
 
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err) {
+       ret = pci_request_regions(pdev, DRV_NAME);
+       if (ret) {
                dev_err(&pdev->dev, "pci_request_regions failed\n");
                goto out_pci_disable_device;
        }
@@ -2515,7 +2720,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
        if (!trans_pcie->hw_base) {
                dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
-               err = -ENODEV;
+               ret = -ENODEV;
                goto out_pci_release_regions;
        }
 
@@ -2527,9 +2732,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->pci_dev = pdev;
        iwl_disable_interrupts(trans);
 
-       err = pci_enable_msi(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+       ret = pci_enable_msi(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2752,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
                unsigned long flags;
-               int ret;
 
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               ret = iwl_pcie_prepare_card_hw(trans);
+               if (ret) {
+                       IWL_WARN(trans, "Exit HW not ready\n");
+                       goto out_pci_disable_msi;
+               }
+
                /*
                 * in-order to recognize C step driver should read chip version
                 * id located at the AUX bus MISC address space.
@@ -2591,13 +2801,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-       if (iwl_pcie_alloc_ict(trans))
+       ret = iwl_pcie_alloc_ict(trans);
+       if (ret)
                goto out_pci_disable_msi;
 
-       err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+       ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
                                   IRQF_SHARED, DRV_NAME, trans);
-       if (err) {
+       if (ret) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
@@ -2617,5 +2828,5 @@ out_pci_disable_device:
        pci_disable_device(pdev);
 out_no_pci:
        iwl_trans_free(trans);
-       return ERR_PTR(err);
+       return ERR_PTR(ret);
 }
index 2b86c21..a8c8a4a 100644 (file)
@@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
-       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
        sta_id = tx_cmd->sta_id;
        sec_ctl = tx_cmd->sec_ctl;
 
@@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        if (trans_pcie->bc_table_dword)
                len = DIV_ROUND_UP(len, 4);
 
+       if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+               return;
+
        bc_ent = cpu_to_le16(len | (sta_id << 12));
 
        scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
 
        /* first TB is never freed - it's the scratchbuf data */
 
-       for (i = 1; i < num_tbs; i++)
-               dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-                                iwl_pcie_tfd_tb_get_len(tfd, i),
-                                DMA_TO_DEVICE);
-
+       for (i = 1; i < num_tbs; i++) {
+               if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+                       dma_unmap_page(trans->dev,
+                                      iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                      iwl_pcie_tfd_tb_get_len(tfd, i),
+                                      DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(trans->dev,
+                                        iwl_pcie_tfd_tb_get_addr(tfd, i),
+                                        iwl_pcie_tfd_tb_get_len(tfd, i),
+                                        DMA_TO_DEVICE);
+       }
        tfd->num_tbs = 0;
 }
 
@@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
 
        iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
-       return 0;
+       return num_tbs;
 }
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                }
        }
 
+       iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
        if (trans->cfg->base_params->num_of_queues > 20)
                iwl_set_bits_prph(trans, SCD_GP_CTRL,
                                  SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        int idx;
        u16 copy_size, cmd_size, scratch_size;
        bool had_nocopy = false;
+       u8 group_id = iwl_cmd_groupid(cmd->id);
        int i, ret;
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
-       copy_size = sizeof(out_cmd->hdr);
-       cmd_size = sizeof(out_cmd->hdr);
+       if (WARN(!trans_pcie->wide_cmd_header &&
+                group_id > IWL_ALWAYS_LONG_GROUP,
+                "unsupported wide command %#x\n", cmd->id))
+               return -EINVAL;
+
+       if (group_id != 0) {
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+               cmd_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               copy_size = sizeof(struct iwl_cmd_header);
+               cmd_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* need one for the header if the first is NOCOPY */
        BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                out_meta->source = cmd;
 
        /* set up the header */
-
-       out_cmd->hdr.cmd = cmd->id;
-       out_cmd->hdr.flags = 0;
-       out_cmd->hdr.sequence =
-               cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
-                                        INDEX_TO_SEQ(q->write_ptr));
+       if (group_id != 0) {
+               out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr_wide.group_id = group_id;
+               out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+               out_cmd->hdr_wide.length =
+                       cpu_to_le16(cmd_size -
+                                   sizeof(struct iwl_cmd_header_wide));
+               out_cmd->hdr_wide.reserved = 0;
+               out_cmd->hdr_wide.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+
+               cmd_pos = sizeof(struct iwl_cmd_header_wide);
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+               out_cmd->hdr.group_id = 0;
+
+               cmd_pos = sizeof(struct iwl_cmd_header);
+               copy_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* and copy the data that needs to be copied */
-       cmd_pos = offsetof(struct iwl_device_cmd, payload);
-       copy_size = sizeof(out_cmd->hdr);
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy;
 
@@ -1464,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        }
 
        IWL_DEBUG_HC(trans,
-                    "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+                    "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
                     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
-                    out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+                    group_id, out_cmd->hdr.cmd,
+                    le16_to_cpu(out_cmd->hdr.sequence),
                     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
        /* start the TFD with the scratchbuf */
@@ -1516,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
        }
 
+       BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+                    sizeof(out_meta->flags) * BITS_PER_BYTE);
        out_meta->flags = cmd->flags;
        if (WARN_ON_ONCE(txq->entries[idx].free_buf))
                kzfree(txq->entries[idx].free_buf);
        txq->entries[idx].free_buf = dup_buf;
 
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 /*
  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers)
  *
  * If an Rx buffer has an async callback associated with it the callback
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status)
+                           struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                meta->source->resp_pkt = pkt;
                meta->source->_rx_page_addr = (unsigned long)page_address(p);
                meta->source->_rx_page_order = trans_pcie->rx_page_order;
-               meta->source->handler_status = handler_status;
        }
 
        iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1762,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ieee80211_hdr *hdr;
        struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
        struct iwl_cmd_meta *out_meta;
        struct iwl_txq *txq;
@@ -1771,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        void *tb1_addr;
        u16 len, tb1_len, tb2_len;
        bool wait_write_ptr;
-       __le16 fc = hdr->frame_control;
-       u8 hdr_len = ieee80211_hdrlen(fc);
+       __le16 fc;
+       u8 hdr_len;
        u16 wifi_seq;
+       int i;
 
        txq = &trans_pcie->txq[txq_id];
        q = &txq->q;
@@ -1782,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                      "TX on unused queue %d\n", txq_id))
                return -EINVAL;
 
+       if (skb_is_nonlinear(skb) &&
+           skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+           __skb_linearize(skb))
+               return -ENOMEM;
+
+       /* mac80211 always puts the full header into the SKB's head,
+        * so there's no need to check if it's readable there
+        */
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = hdr->frame_control;
+       hdr_len = ieee80211_hdrlen(fc);
+
        spin_lock(&txq->lock);
 
        /* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1812,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* Set up first empty entry in queue's array of Tx/cmd buffers */
        out_meta = &txq->entries[q->write_ptr].meta;
+       out_meta->flags = 0;
 
        /*
         * The second TB (tb1) points to the remainder of the TX command
@@ -1845,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /*
         * Set up TFD's third entry to point directly to remainder
-        * of skb, if any (802.11 null frames have no payload).
+        * of skb's head, if any
         */
-       tb2_len = skb->len - hdr_len;
+       tb2_len = skb_headlen(skb) - hdr_len;
        if (tb2_len > 0) {
                dma_addr_t tb2_phys = dma_map_single(trans->dev,
                                                     skb->data + hdr_len,
@@ -1860,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
        }
 
+       /* set up the remaining entries to point to the data */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               dma_addr_t tb_phys;
+               int tb_idx;
+
+               if (!skb_frag_size(frag))
+                       continue;
+
+               tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+                                          skb_frag_size(frag), DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+                       iwl_pcie_tfd_unmap(trans, out_meta,
+                                          &txq->tfds[q->write_ptr]);
+                       goto out_err;
+               }
+               tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+                                               skb_frag_size(frag), false);
+
+               out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+       }
+
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
@@ -1869,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                             &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
                             skb->data + hdr_len, tb2_len);
        trace_iwlwifi_dev_tx_data(trans->dev, skb,
-                                 skb->data + hdr_len, tb2_len);
+                                 hdr_len, skb->len - hdr_len);
 
        wait_write_ptr = ieee80211_has_morefrags(fc);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr) {
-               if (txq->wd_timeout)
-                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+               if (txq->wd_timeout) {
+                       /*
+                        * If the TXQ is active, then set the timer, if not,
+                        * set the timer in remainder so that the timer will
+                        * be armed with the right value when the station will
+                        * wake up.
+                        */
+                       if (!txq->frozen)
+                               mod_timer(&txq->stuck_timer,
+                                         jiffies + txq->wd_timeout);
+                       else
+                               txq->frozen_expiry_remainder = txq->wd_timeout;
+               }
                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
                iwl_trans_pcie_ref(trans);
        }