1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
24 * Contact Information:
25 * Intel Linux Wireless <linuxwifi@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
32 * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
39 * * Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * * Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in
43 * the documentation and/or other materials provided with the
45 * * Neither the name Intel Corporation nor the names of its
46 * contributors may be used to endorse or promote products derived
47 * from this software without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62 #include <net/mac80211.h>
63 #include <linux/netdevice.h>
65 #include "iwl-trans.h"
66 #include "iwl-op-mode.h"
68 #include "iwl-debug.h"
69 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
70 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-phy-db.h"
77 #include "iwl-modparams.h"
78 #include "iwl-nvm-parse.h"
80 #define MVM_UCODE_ALIVE_TIMEOUT HZ
81 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
83 #define UCODE_VALID_OK cpu_to_le32(0x1)
85 struct iwl_mvm_alive_data {
90 /* set device type and latency */
91 static int iwl_set_soc_latency(struct iwl_mvm *mvm)
93 struct iwl_soc_configuration_cmd cmd;
96 cmd.device_type = (mvm->trans->trans_cfg->integrated) ?
97 cpu_to_le32(SOC_CONFIG_CMD_INTEGRATED) :
98 cpu_to_le32(SOC_CONFIG_CMD_DISCRETE);
99 cmd.soc_latency = cpu_to_le32(mvm->trans->trans_cfg->xtal_latency);
101 ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SOC_CONFIGURATION_CMD,
105 IWL_ERR(mvm, "Failed to set soc latency: %d\n", ret);
109 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
111 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
112 .valid = cpu_to_le32(valid_tx_ant),
115 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
116 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
117 sizeof(tx_ant_cmd), &tx_ant_cmd);
120 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
123 struct iwl_rss_config_cmd cmd = {
124 .flags = cpu_to_le32(IWL_RSS_ENABLE),
125 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
126 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
127 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
128 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
129 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
130 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
133 if (mvm->trans->num_rx_queues == 1)
136 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
137 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
138 cmd.indirection_table[i] =
139 1 + (i % (mvm->trans->num_rx_queues - 1));
140 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
142 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
145 static int iwl_configure_rxq(struct iwl_mvm *mvm)
147 int i, num_queues, size, ret;
148 struct iwl_rfh_queue_config *cmd;
149 struct iwl_host_cmd hcmd = {
150 .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
151 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
154 /* Do not configure default queue, it is configured via context info */
155 num_queues = mvm->trans->num_rx_queues - 1;
157 size = struct_size(cmd, data, num_queues);
159 cmd = kzalloc(size, GFP_KERNEL);
163 cmd->num_queues = num_queues;
165 for (i = 0; i < num_queues; i++) {
166 struct iwl_trans_rxq_dma_data data;
168 cmd->data[i].q_num = i + 1;
169 iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
171 cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
172 cmd->data[i].urbd_stts_wrptr =
173 cpu_to_le64(data.urbd_stts_wrptr);
174 cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
175 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
181 ret = iwl_mvm_send_cmd(mvm, &hcmd);
188 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
190 struct iwl_dqa_enable_cmd dqa_cmd = {
191 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
193 u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
196 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
198 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
200 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
205 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
206 struct iwl_rx_cmd_buffer *rxb)
208 struct iwl_rx_packet *pkt = rxb_addr(rxb);
209 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
210 __le32 *dump_data = mfu_dump_notif->data;
211 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
214 if (mfu_dump_notif->index_num == 0)
215 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
216 le32_to_cpu(mfu_dump_notif->assert_id));
218 for (i = 0; i < n_words; i++)
220 "MFUART assert dump, dword %u: 0x%08x\n",
221 le16_to_cpu(mfu_dump_notif->index_num) *
223 le32_to_cpu(dump_data[i]));
226 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
227 struct iwl_rx_packet *pkt, void *data)
229 struct iwl_mvm *mvm =
230 container_of(notif_wait, struct iwl_mvm, notif_wait);
231 struct iwl_mvm_alive_data *alive_data = data;
232 struct mvm_alive_resp_v3 *palive3;
233 struct mvm_alive_resp *palive;
234 struct iwl_umac_alive *umac;
235 struct iwl_lmac_alive *lmac1;
236 struct iwl_lmac_alive *lmac2 = NULL;
238 u32 lmac_error_event_table, umac_error_event_table;
240 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
241 palive = (void *)pkt->data;
242 umac = &palive->umac_data;
243 lmac1 = &palive->lmac_data[0];
244 lmac2 = &palive->lmac_data[1];
245 status = le16_to_cpu(palive->status);
247 palive3 = (void *)pkt->data;
248 umac = &palive3->umac_data;
249 lmac1 = &palive3->lmac_data;
250 status = le16_to_cpu(palive3->status);
253 lmac_error_event_table =
254 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
255 iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
258 mvm->trans->dbg.lmac_error_event_table[1] =
259 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
261 umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
263 if (!umac_error_event_table) {
264 mvm->support_umac_log = false;
265 } else if (umac_error_event_table >=
266 mvm->trans->cfg->min_umac_error_event_table) {
267 mvm->support_umac_log = true;
270 "Not valid error log pointer 0x%08X for %s uCode\n",
271 umac_error_event_table,
272 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
274 mvm->support_umac_log = false;
277 if (mvm->support_umac_log)
278 iwl_fw_umac_set_alive_err_table(mvm->trans,
279 umac_error_event_table);
281 alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
282 alive_data->valid = status == IWL_ALIVE_STATUS_OK;
285 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
286 status, lmac1->ver_type, lmac1->ver_subtype);
289 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
292 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
293 le32_to_cpu(umac->umac_major),
294 le32_to_cpu(umac->umac_minor));
296 iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
301 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
302 struct iwl_rx_packet *pkt, void *data)
304 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
309 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
310 struct iwl_rx_packet *pkt, void *data)
312 struct iwl_phy_db *phy_db = data;
314 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
315 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
319 WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
324 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
325 enum iwl_ucode_type ucode_type)
327 struct iwl_notification_wait alive_wait;
328 struct iwl_mvm_alive_data alive_data = {};
329 const struct fw_img *fw;
331 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
332 static const u16 alive_cmd[] = { MVM_ALIVE };
334 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
336 if (ucode_type == IWL_UCODE_REGULAR &&
337 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
338 !(fw_has_capa(&mvm->fw->ucode_capa,
339 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
340 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
342 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
345 iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
346 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
348 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
349 alive_cmd, ARRAY_SIZE(alive_cmd),
350 iwl_alive_fn, &alive_data);
353 * We want to load the INIT firmware even in RFKILL
354 * For the unified firmware case, the ucode_type is not
355 * INIT, but we still need to run it.
357 ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
359 iwl_fw_set_current_image(&mvm->fwrt, old_type);
360 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
365 * Some things may run in the background now, but we
366 * just wait for the ALIVE notification here.
368 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
369 MVM_UCODE_ALIVE_TIMEOUT);
371 struct iwl_trans *trans = mvm->trans;
373 if (trans->trans_cfg->device_family >=
374 IWL_DEVICE_FAMILY_22000) {
376 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
377 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
378 iwl_read_umac_prph(trans,
379 UMAG_SB_CPU_2_STATUS));
380 IWL_ERR(mvm, "UMAC PC: 0x%x\n",
381 iwl_read_umac_prph(trans,
382 UREG_UMAC_CURRENT_PC));
383 IWL_ERR(mvm, "LMAC PC: 0x%x\n",
384 iwl_read_umac_prph(trans,
385 UREG_LMAC1_CURRENT_PC));
386 if (iwl_mvm_is_cdb_supported(mvm))
387 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
388 iwl_read_umac_prph(trans,
389 UREG_LMAC2_CURRENT_PC));
390 } else if (trans->trans_cfg->device_family >=
391 IWL_DEVICE_FAMILY_8000) {
393 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
394 iwl_read_prph(trans, SB_CPU_1_STATUS),
395 iwl_read_prph(trans, SB_CPU_2_STATUS));
398 if (ret == -ETIMEDOUT)
399 iwl_fw_dbg_error_collect(&mvm->fwrt,
400 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
402 iwl_fw_set_current_image(&mvm->fwrt, old_type);
406 if (!alive_data.valid) {
407 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
408 iwl_fw_set_current_image(&mvm->fwrt, old_type);
412 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
415 * Note: all the queues are enabled as part of the interface
416 * initialization, but in firmware restart scenarios they
417 * could be stopped, so wake them up. In firmware restart,
418 * mac80211 will have the queues stopped as well until the
419 * reconfiguration completes. During normal startup, they
423 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
425 * Set a 'fake' TID for the command queue, since we use the
426 * hweight() of the tid_bitmap as a refcount now. Not that
427 * we ever even consider the command queue as one we might
428 * want to reuse, but be safe nevertheless.
430 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
431 BIT(IWL_MAX_TID_COUNT + 2);
433 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
434 #ifdef CONFIG_IWLWIFI_DEBUGFS
435 iwl_fw_set_dbg_rec_on(&mvm->fwrt);
441 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
443 struct iwl_notification_wait init_wait;
444 struct iwl_nvm_access_complete_cmd nvm_complete = {};
445 struct iwl_init_extended_cfg_cmd init_cfg = {
446 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
448 static const u16 init_complete[] = {
453 if (mvm->trans->cfg->tx_with_siso_diversity)
454 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
456 lockdep_assert_held(&mvm->mutex);
458 mvm->rfkill_safe_init_done = false;
460 iwl_init_notification_wait(&mvm->notif_wait,
463 ARRAY_SIZE(init_complete),
464 iwl_wait_init_complete,
467 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
469 /* Will also start the device */
470 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
472 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
475 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
478 /* Send init config command to mark that we are sending NVM access
481 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
482 INIT_EXTENDED_CFG_CMD),
484 sizeof(init_cfg), &init_cfg);
486 IWL_ERR(mvm, "Failed to run init config command: %d\n",
491 /* Load NVM to NIC if needed */
492 if (mvm->nvm_file_name) {
493 iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
495 iwl_mvm_load_nvm_to_nic(mvm);
498 if (IWL_MVM_PARSE_NVM && read_nvm) {
499 ret = iwl_nvm_init(mvm);
501 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
506 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
507 NVM_ACCESS_COMPLETE),
509 sizeof(nvm_complete), &nvm_complete);
511 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
516 /* We wait for the INIT complete notification */
517 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
518 MVM_UCODE_ALIVE_TIMEOUT);
522 /* Read the NVM only at driver load time, no need to do this twice */
523 if (!IWL_MVM_PARSE_NVM && read_nvm) {
524 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
525 if (IS_ERR(mvm->nvm_data)) {
526 ret = PTR_ERR(mvm->nvm_data);
527 mvm->nvm_data = NULL;
528 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
533 mvm->rfkill_safe_init_done = true;
538 iwl_remove_notification(&mvm->notif_wait, &init_wait);
542 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
544 struct iwl_phy_cfg_cmd phy_cfg_cmd;
545 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
547 if (iwl_mvm_has_unified_ucode(mvm) &&
548 !mvm->trans->cfg->tx_with_siso_diversity)
551 if (mvm->trans->cfg->tx_with_siso_diversity) {
553 * TODO: currently we don't set the antenna but letting the NIC
554 * to decide which antenna to use. This should come from BIOS.
556 phy_cfg_cmd.phy_cfg =
557 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
561 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
563 /* set flags extra PHY configuration flags from the device's cfg */
564 phy_cfg_cmd.phy_cfg |=
565 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
567 phy_cfg_cmd.calib_control.event_trigger =
568 mvm->fw->default_calib[ucode_type].event_trigger;
569 phy_cfg_cmd.calib_control.flow_trigger =
570 mvm->fw->default_calib[ucode_type].flow_trigger;
572 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
573 phy_cfg_cmd.phy_cfg);
575 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
576 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
579 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
581 struct iwl_notification_wait calib_wait;
582 static const u16 init_complete[] = {
584 CALIB_RES_NOTIF_PHY_DB
588 if (iwl_mvm_has_unified_ucode(mvm))
589 return iwl_run_unified_mvm_ucode(mvm, true);
591 lockdep_assert_held(&mvm->mutex);
593 mvm->rfkill_safe_init_done = false;
595 iwl_init_notification_wait(&mvm->notif_wait,
598 ARRAY_SIZE(init_complete),
599 iwl_wait_phy_db_entry,
602 /* Will also start the device */
603 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
605 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
609 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
610 ret = iwl_mvm_send_bt_init_conf(mvm);
615 /* Read the NVM only at driver load time, no need to do this twice */
617 ret = iwl_nvm_init(mvm);
619 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
624 /* In case we read the NVM from external file, load it to the NIC */
625 if (mvm->nvm_file_name)
626 iwl_mvm_load_nvm_to_nic(mvm);
628 WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
629 "Too old NVM version (0x%0x, required = 0x%0x)",
630 mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
633 * abort after reading the nvm in case RF Kill is on, we will complete
634 * the init seq later when RF kill will switch to off
636 if (iwl_mvm_is_radio_hw_killed(mvm)) {
637 IWL_DEBUG_RF_KILL(mvm,
638 "jump over all phy activities due to RF kill\n");
642 mvm->rfkill_safe_init_done = true;
644 /* Send TX valid antennas before triggering calibrations */
645 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
649 ret = iwl_send_phy_cfg_cmd(mvm);
651 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
657 * Some things may run in the background now, but we
658 * just wait for the calibration complete notification.
660 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
661 MVM_UCODE_CALIB_TIMEOUT);
665 if (iwl_mvm_is_radio_hw_killed(mvm)) {
666 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
669 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
676 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
678 mvm->rfkill_safe_init_done = false;
679 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
680 /* we want to debug INIT and we have no NVM - fake */
681 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
682 sizeof(struct ieee80211_channel) +
683 sizeof(struct ieee80211_rate),
687 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
688 mvm->nvm_data->bands[0].n_channels = 1;
689 mvm->nvm_data->bands[0].n_bitrates = 1;
690 mvm->nvm_data->bands[0].bitrates =
691 (void *)mvm->nvm_data->channels + 1;
692 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
698 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
700 struct iwl_ltr_config_cmd cmd = {
701 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
704 if (!mvm->trans->ltr_enabled)
707 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
712 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
715 struct iwl_dev_tx_power_cmd v5;
716 struct iwl_dev_tx_power_cmd_v4 v4;
721 cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
723 if (fw_has_api(&mvm->fw->ucode_capa,
724 IWL_UCODE_TLV_API_REDUCE_TX_POWER))
725 len = sizeof(cmd.v5);
726 else if (fw_has_capa(&mvm->fw->ucode_capa,
727 IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
728 len = sizeof(struct iwl_dev_tx_power_cmd_v4);
730 len = sizeof(cmd.v4.v3);
733 if (iwl_sar_select_profile(&mvm->fwrt, cmd.v5.v3.per_chain_restriction,
736 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
737 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
740 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
742 union geo_tx_power_profiles_cmd geo_tx_cmd;
745 struct iwl_host_cmd cmd;
747 if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
748 IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
749 geo_tx_cmd.geo_cmd.ops =
750 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
751 len = sizeof(geo_tx_cmd.geo_cmd);
753 geo_tx_cmd.geo_cmd_v1.ops =
754 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
755 len = sizeof(geo_tx_cmd.geo_cmd_v1);
758 if (!iwl_sar_geo_support(&mvm->fwrt))
761 cmd = (struct iwl_host_cmd){
762 .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
764 .flags = CMD_WANT_SKB,
765 .data = { &geo_tx_cmd },
768 ret = iwl_mvm_send_cmd(mvm, &cmd);
770 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
773 ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
778 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
780 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
781 union geo_tx_power_profiles_cmd cmd;
784 cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
786 iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
788 cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
790 if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
791 IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
792 len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
794 len = sizeof(cmd.geo_cmd);
797 return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
800 static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
802 union acpi_object *wifi_pkg, *data, *enabled;
803 int i, j, ret, tbl_rev;
806 mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
807 data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
809 return PTR_ERR(data);
811 wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
812 ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
814 if (IS_ERR(wifi_pkg)) {
815 ret = PTR_ERR(wifi_pkg);
824 enabled = &wifi_pkg->package.elements[1];
825 if (enabled->type != ACPI_TYPE_INTEGER ||
826 (enabled->integer.value != 0 && enabled->integer.value != 1)) {
831 mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
832 if (!mvm->fwrt.ppag_table.enabled) {
838 * read, verify gain values and save them into the PPAG table.
839 * first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
840 * following sub-bands to High-Band (5GHz).
842 for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
843 for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
844 union acpi_object *ent;
846 ent = &wifi_pkg->package.elements[idx++];
847 if (ent->type != ACPI_TYPE_INTEGER ||
848 (j == 0 && ent->integer.value > ACPI_PPAG_MAX_LB) ||
849 (j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
850 (j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
851 (j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
852 mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
856 mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
865 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
869 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
871 "PPAG capability not supported by FW, command not sent.\n");
875 if (!mvm->fwrt.ppag_table.enabled) {
877 "PPAG not enabled, command not sent.\n");
881 IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
883 for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
884 for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
886 "PPAG table: chain[%d] band[%d]: gain = %d\n",
887 i, j, mvm->fwrt.ppag_table.gain[i][j]);
891 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
892 PER_PLATFORM_ANT_GAIN_CMD),
893 0, sizeof(mvm->fwrt.ppag_table),
894 &mvm->fwrt.ppag_table);
896 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
902 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
906 ret = iwl_mvm_get_ppag_table(mvm);
909 "PPAG BIOS table invalid or unavailable. (%d)\n",
913 return iwl_mvm_ppag_send_cmd(mvm);
916 #else /* CONFIG_ACPI */
918 inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
919 int prof_a, int prof_b)
924 inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
929 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
934 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
939 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
943 #endif /* CONFIG_ACPI */
945 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
947 u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
951 struct iwl_fw_error_recovery_cmd recovery_cmd = {
952 .flags = cpu_to_le32(flags),
955 struct iwl_host_cmd host_cmd = {
956 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
957 .flags = CMD_WANT_SKB,
958 .data = {&recovery_cmd, },
959 .len = {sizeof(recovery_cmd), },
962 /* no error log was defined in TLV */
966 if (flags & ERROR_RECOVERY_UPDATE_DB) {
967 /* no buf was allocated while HW reset */
968 if (!mvm->error_recovery_buf)
971 host_cmd.data[1] = mvm->error_recovery_buf;
972 host_cmd.len[1] = error_log_size;
973 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
974 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
977 ret = iwl_mvm_send_cmd(mvm, &host_cmd);
978 kfree(mvm->error_recovery_buf);
979 mvm->error_recovery_buf = NULL;
982 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
986 /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
987 if (flags & ERROR_RECOVERY_UPDATE_DB) {
988 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
991 "Failed to send recovery cmd blob was invalid %d\n",
996 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1000 ret = iwl_sar_get_wrds_table(&mvm->fwrt);
1002 IWL_DEBUG_RADIO(mvm,
1003 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1006 * If not available, don't fail and don't bother with EWRD.
1007 * Return 1 to tell that we can't use WGDS either.
1012 ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
1013 /* if EWRD is not available, we can still use WRDS, so don't fail */
1015 IWL_DEBUG_RADIO(mvm,
1016 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1019 ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
1021 * If we don't have profile 0 from BIOS, just skip it. This
1022 * means that SAR Geo will not be enabled either, even if we
1023 * have other valid profiles.
1031 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1035 if (iwl_mvm_has_unified_ucode(mvm))
1036 return iwl_run_unified_mvm_ucode(mvm, false);
1038 ret = iwl_run_init_mvm_ucode(mvm, false);
1041 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1043 if (iwlmvm_mod_params.init_dbg)
1048 iwl_fw_dbg_stop_sync(&mvm->fwrt);
1049 iwl_trans_stop_device(mvm->trans);
1050 ret = iwl_trans_start_hw(mvm->trans);
1054 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
1056 mvm->rfkill_safe_init_done = false;
1057 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1061 mvm->rfkill_safe_init_done = true;
1063 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
1066 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1069 int iwl_mvm_up(struct iwl_mvm *mvm)
1072 struct ieee80211_channel *chan;
1073 struct cfg80211_chan_def chandef;
1074 struct ieee80211_supported_band *sband = NULL;
1076 lockdep_assert_held(&mvm->mutex);
1078 ret = iwl_trans_start_hw(mvm->trans);
1082 ret = iwl_mvm_load_rt_fw(mvm);
1084 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1085 if (ret != -ERFKILL)
1086 iwl_fw_dbg_error_collect(&mvm->fwrt,
1087 FW_DBG_TRIGGER_DRIVER);
1091 iwl_get_shared_mem_conf(&mvm->fwrt);
1093 ret = iwl_mvm_sf_update(mvm, NULL, false);
1095 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1097 if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
1098 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1099 /* if we have a destination, assume EARLY START */
1100 if (mvm->fw->dbg.dest_tlv)
1101 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1102 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1105 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1109 if (!iwl_mvm_has_unified_ucode(mvm)) {
1110 /* Send phy db control command and then phy db calibration */
1111 ret = iwl_send_phy_db_data(mvm->phy_db);
1116 ret = iwl_send_phy_cfg_cmd(mvm);
1120 ret = iwl_mvm_send_bt_init_conf(mvm);
1124 if (fw_has_capa(&mvm->fw->ucode_capa,
1125 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
1126 ret = iwl_set_soc_latency(mvm);
1131 /* Init RSS configuration */
1132 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1133 ret = iwl_configure_rxq(mvm);
1135 IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
1141 if (iwl_mvm_has_new_rx_api(mvm)) {
1142 ret = iwl_send_rss_cfg_cmd(mvm);
1144 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1150 /* init the fw <-> mac80211 STA mapping */
1151 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1152 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1154 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1156 /* reset quota debouncing buffer - 0xff will yield invalid data */
1157 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1159 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
1160 ret = iwl_mvm_send_dqa_cmd(mvm);
1165 /* Add auxiliary station for scanning */
1166 ret = iwl_mvm_add_aux_sta(mvm);
1170 /* Add all the PHY contexts */
1172 while (!sband && i < NUM_NL80211_BANDS)
1173 sband = mvm->hw->wiphy->bands[i++];
1175 if (WARN_ON_ONCE(!sband))
1178 chan = &sband->channels[0];
1180 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1181 for (i = 0; i < NUM_PHY_CTX; i++) {
1183 * The channel used here isn't relevant as it's
1184 * going to be overwritten in the other flows.
1185 * For now use the first channel we have.
1187 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1193 if (iwl_mvm_is_tt_in_fw(mvm)) {
1194 /* in order to give the responsibility of ct-kill and
1195 * TX backoff to FW we need to send empty temperature reporting
1196 * cmd during init time
1198 iwl_mvm_send_temp_report_ths_cmd(mvm);
1200 /* Initialize tx backoffs to the minimal possible */
1201 iwl_mvm_tt_tx_backoff(mvm, 0);
1204 #ifdef CONFIG_THERMAL
1205 /* TODO: read the budget from BIOS / Platform NVM */
1208 * In case there is no budget from BIOS / Platform NVM the default
1209 * budget should be 2000mW (cooling state 0).
1211 if (iwl_mvm_is_ctdp_supported(mvm)) {
1212 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1213 mvm->cooling_dev.cur_state);
1219 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
1220 WARN_ON(iwl_mvm_config_ltr(mvm));
1222 ret = iwl_mvm_power_update_device(mvm);
1227 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1228 * anyway, so don't init MCC.
1230 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1231 ret = iwl_mvm_init_mcc(mvm);
1236 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1237 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1238 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1239 ret = iwl_mvm_config_scan(mvm);
1244 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1245 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1247 if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1248 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1250 ret = iwl_mvm_ppag_init(mvm);
1254 ret = iwl_mvm_sar_init(mvm);
1256 ret = iwl_mvm_sar_geo_init(mvm);
1257 } else if (ret > 0 && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
1259 * If basic SAR is not available, we check for WGDS,
1260 * which should *not* be available either. If it is
1261 * available, issue an error, because we can't use SAR
1262 * Geo without basic SAR.
1264 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1270 iwl_mvm_leds_sync(mvm);
1272 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1275 if (!iwlmvm_mod_params.init_dbg || !ret)
1276 iwl_mvm_stop_device(mvm);
1280 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1284 lockdep_assert_held(&mvm->mutex);
1286 ret = iwl_trans_start_hw(mvm->trans);
1290 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1292 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1296 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1300 /* Send phy db control command and then phy db calibration*/
1301 ret = iwl_send_phy_db_data(mvm->phy_db);
1305 ret = iwl_send_phy_cfg_cmd(mvm);
1309 /* init the fw <-> mac80211 STA mapping */
1310 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1311 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1313 /* Add auxiliary station for scanning */
1314 ret = iwl_mvm_add_aux_sta(mvm);
1320 iwl_mvm_stop_device(mvm);
1324 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1325 struct iwl_rx_cmd_buffer *rxb)
1327 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1328 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1329 u32 flags = le32_to_cpu(card_state_notif->flags);
1331 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1332 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1333 (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1334 (flags & CT_KILL_CARD_DISABLED) ?
1335 "Reached" : "Not reached");
1338 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1339 struct iwl_rx_cmd_buffer *rxb)
1341 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1342 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1345 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1346 le32_to_cpu(mfuart_notif->installed_ver),
1347 le32_to_cpu(mfuart_notif->external_ver),
1348 le32_to_cpu(mfuart_notif->status),
1349 le32_to_cpu(mfuart_notif->duration));
1351 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1353 "MFUART: image size: 0x%08x\n",
1354 le32_to_cpu(mfuart_notif->image_size));