ARC: [plat-hsdk]: unify memory apertures configuration
[linux-2.6-microblaze.git] / drivers / net / wireless / intel / iwlwifi / mvm / fw.c
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  * Copyright(c) 2018 - 2019        Intel Corporation
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of version 2 of the GNU General Public License as
15  * published by the Free Software Foundation.
16  *
17  * This program is distributed in the hope that it will be useful, but
18  * WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * General Public License for more details.
21  *
22  * The full GNU General Public License is included in this distribution
23  * in the file called COPYING.
24  *
25  * Contact Information:
26  *  Intel Linux Wireless <linuxwifi@intel.com>
27  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28  *
29  * BSD LICENSE
30  *
31  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34  * Copyright(c) 2018 - 2019       Intel Corporation
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  *
41  *  * Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  *  * Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in
45  *    the documentation and/or other materials provided with the
46  *    distribution.
47  *  * Neither the name Intel Corporation nor the names of its
48  *    contributors may be used to endorse or promote products derived
49  *    from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  *
63  *****************************************************************************/
64 #include <net/mac80211.h>
65 #include <linux/netdevice.h>
66
67 #include "iwl-trans.h"
68 #include "iwl-op-mode.h"
69 #include "fw/img.h"
70 #include "iwl-debug.h"
71 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
72 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
73 #include "iwl-prph.h"
74 #include "fw/acpi.h"
75
76 #include "mvm.h"
77 #include "fw/dbg.h"
78 #include "iwl-phy-db.h"
79 #include "iwl-modparams.h"
80 #include "iwl-nvm-parse.h"
81
82 #define MVM_UCODE_ALIVE_TIMEOUT HZ
83 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
84
85 #define UCODE_VALID_OK  cpu_to_le32(0x1)
86
87 struct iwl_mvm_alive_data {
88         bool valid;
89         u32 scd_base_addr;
90 };
91
92 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
93 {
94         struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
95                 .valid = cpu_to_le32(valid_tx_ant),
96         };
97
98         IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
99         return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
100                                     sizeof(tx_ant_cmd), &tx_ant_cmd);
101 }
102
103 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
104 {
105         int i;
106         struct iwl_rss_config_cmd cmd = {
107                 .flags = cpu_to_le32(IWL_RSS_ENABLE),
108                 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
109                              BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
110                              BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
111                              BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
112                              BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
113                              BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
114         };
115
116         if (mvm->trans->num_rx_queues == 1)
117                 return 0;
118
119         /* Do not direct RSS traffic to Q 0 which is our fallback queue */
120         for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
121                 cmd.indirection_table[i] =
122                         1 + (i % (mvm->trans->num_rx_queues - 1));
123         netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
124
125         return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
126 }
127
128 static int iwl_configure_rxq(struct iwl_mvm *mvm)
129 {
130         int i, num_queues, size, ret;
131         struct iwl_rfh_queue_config *cmd;
132         struct iwl_host_cmd hcmd = {
133                 .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
134                 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
135         };
136
137         /* Do not configure default queue, it is configured via context info */
138         num_queues = mvm->trans->num_rx_queues - 1;
139
140         size = struct_size(cmd, data, num_queues);
141
142         cmd = kzalloc(size, GFP_KERNEL);
143         if (!cmd)
144                 return -ENOMEM;
145
146         cmd->num_queues = num_queues;
147
148         for (i = 0; i < num_queues; i++) {
149                 struct iwl_trans_rxq_dma_data data;
150
151                 cmd->data[i].q_num = i + 1;
152                 iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
153
154                 cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
155                 cmd->data[i].urbd_stts_wrptr =
156                         cpu_to_le64(data.urbd_stts_wrptr);
157                 cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
158                 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
159         }
160
161         hcmd.data[0] = cmd;
162         hcmd.len[0] = size;
163
164         ret = iwl_mvm_send_cmd(mvm, &hcmd);
165
166         kfree(cmd);
167
168         return ret;
169 }
170
171 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
172 {
173         struct iwl_dqa_enable_cmd dqa_cmd = {
174                 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
175         };
176         u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
177         int ret;
178
179         ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
180         if (ret)
181                 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
182         else
183                 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
184
185         return ret;
186 }
187
188 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
189                                    struct iwl_rx_cmd_buffer *rxb)
190 {
191         struct iwl_rx_packet *pkt = rxb_addr(rxb);
192         struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
193         __le32 *dump_data = mfu_dump_notif->data;
194         int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
195         int i;
196
197         if (mfu_dump_notif->index_num == 0)
198                 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
199                          le32_to_cpu(mfu_dump_notif->assert_id));
200
201         for (i = 0; i < n_words; i++)
202                 IWL_DEBUG_INFO(mvm,
203                                "MFUART assert dump, dword %u: 0x%08x\n",
204                                le16_to_cpu(mfu_dump_notif->index_num) *
205                                n_words + i,
206                                le32_to_cpu(dump_data[i]));
207 }
208
209 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
210                          struct iwl_rx_packet *pkt, void *data)
211 {
212         struct iwl_mvm *mvm =
213                 container_of(notif_wait, struct iwl_mvm, notif_wait);
214         struct iwl_mvm_alive_data *alive_data = data;
215         struct mvm_alive_resp_v3 *palive3;
216         struct mvm_alive_resp *palive;
217         struct iwl_umac_alive *umac;
218         struct iwl_lmac_alive *lmac1;
219         struct iwl_lmac_alive *lmac2 = NULL;
220         u16 status;
221         u32 lmac_error_event_table, umac_error_event_table;
222
223         if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
224                 palive = (void *)pkt->data;
225                 umac = &palive->umac_data;
226                 lmac1 = &palive->lmac_data[0];
227                 lmac2 = &palive->lmac_data[1];
228                 status = le16_to_cpu(palive->status);
229         } else {
230                 palive3 = (void *)pkt->data;
231                 umac = &palive3->umac_data;
232                 lmac1 = &palive3->lmac_data;
233                 status = le16_to_cpu(palive3->status);
234         }
235
236         lmac_error_event_table =
237                 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
238         iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
239
240         if (lmac2)
241                 mvm->trans->lmac_error_event_table[1] =
242                         le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
243
244         umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
245
246         if (!umac_error_event_table) {
247                 mvm->support_umac_log = false;
248         } else if (umac_error_event_table >=
249                    mvm->trans->cfg->min_umac_error_event_table) {
250                 mvm->support_umac_log = true;
251         } else {
252                 IWL_ERR(mvm,
253                         "Not valid error log pointer 0x%08X for %s uCode\n",
254                         umac_error_event_table,
255                         (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
256                         "Init" : "RT");
257                 mvm->support_umac_log = false;
258         }
259
260         if (mvm->support_umac_log)
261                 iwl_fw_umac_set_alive_err_table(mvm->trans,
262                                                 umac_error_event_table);
263
264         alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
265         alive_data->valid = status == IWL_ALIVE_STATUS_OK;
266
267         IWL_DEBUG_FW(mvm,
268                      "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
269                      status, lmac1->ver_type, lmac1->ver_subtype);
270
271         if (lmac2)
272                 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
273
274         IWL_DEBUG_FW(mvm,
275                      "UMAC version: Major - 0x%x, Minor - 0x%x\n",
276                      le32_to_cpu(umac->umac_major),
277                      le32_to_cpu(umac->umac_minor));
278
279         return true;
280 }
281
282 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
283                                    struct iwl_rx_packet *pkt, void *data)
284 {
285         WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
286
287         return true;
288 }
289
290 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
291                                   struct iwl_rx_packet *pkt, void *data)
292 {
293         struct iwl_phy_db *phy_db = data;
294
295         if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
296                 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
297                 return true;
298         }
299
300         WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
301
302         return false;
303 }
304
305 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
306                                          enum iwl_ucode_type ucode_type)
307 {
308         struct iwl_notification_wait alive_wait;
309         struct iwl_mvm_alive_data alive_data = {};
310         const struct fw_img *fw;
311         int ret;
312         enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
313         static const u16 alive_cmd[] = { MVM_ALIVE };
314
315         if (ucode_type == IWL_UCODE_REGULAR &&
316             iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
317             !(fw_has_capa(&mvm->fw->ucode_capa,
318                           IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
319                 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
320         else
321                 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
322         if (WARN_ON(!fw))
323                 return -EINVAL;
324         iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
325         clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
326
327         iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
328                                    alive_cmd, ARRAY_SIZE(alive_cmd),
329                                    iwl_alive_fn, &alive_data);
330
331         ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
332         if (ret) {
333                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
334                 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
335                 return ret;
336         }
337
338         /*
339          * Some things may run in the background now, but we
340          * just wait for the ALIVE notification here.
341          */
342         ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
343                                     MVM_UCODE_ALIVE_TIMEOUT);
344         if (ret) {
345                 struct iwl_trans *trans = mvm->trans;
346
347                 if (ret == -ETIMEDOUT)
348                         iwl_fw_dbg_error_collect(&mvm->fwrt,
349                                                  FW_DBG_TRIGGER_ALIVE_TIMEOUT);
350
351                 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
352                         IWL_ERR(mvm,
353                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
354                                 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
355                                 iwl_read_umac_prph(trans,
356                                                    UMAG_SB_CPU_2_STATUS));
357                 else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
358                         IWL_ERR(mvm,
359                                 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
360                                 iwl_read_prph(trans, SB_CPU_1_STATUS),
361                                 iwl_read_prph(trans, SB_CPU_2_STATUS));
362                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
363                 return ret;
364         }
365
366         if (!alive_data.valid) {
367                 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
368                 iwl_fw_set_current_image(&mvm->fwrt, old_type);
369                 return -EIO;
370         }
371
372         iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
373
374         /*
375          * Note: all the queues are enabled as part of the interface
376          * initialization, but in firmware restart scenarios they
377          * could be stopped, so wake them up. In firmware restart,
378          * mac80211 will have the queues stopped as well until the
379          * reconfiguration completes. During normal startup, they
380          * will be empty.
381          */
382
383         memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
384         /*
385          * Set a 'fake' TID for the command queue, since we use the
386          * hweight() of the tid_bitmap as a refcount now. Not that
387          * we ever even consider the command queue as one we might
388          * want to reuse, but be safe nevertheless.
389          */
390         mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
391                 BIT(IWL_MAX_TID_COUNT + 2);
392
393         set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
394 #ifdef CONFIG_IWLWIFI_DEBUGFS
395         iwl_fw_set_dbg_rec_on(&mvm->fwrt);
396 #endif
397
398         return 0;
399 }
400
401 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
402 {
403         struct iwl_notification_wait init_wait;
404         struct iwl_nvm_access_complete_cmd nvm_complete = {};
405         struct iwl_init_extended_cfg_cmd init_cfg = {
406                 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
407         };
408         static const u16 init_complete[] = {
409                 INIT_COMPLETE_NOTIF,
410         };
411         int ret;
412
413         lockdep_assert_held(&mvm->mutex);
414
415         iwl_init_notification_wait(&mvm->notif_wait,
416                                    &init_wait,
417                                    init_complete,
418                                    ARRAY_SIZE(init_complete),
419                                    iwl_wait_init_complete,
420                                    NULL);
421
422         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
423
424         /* Will also start the device */
425         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
426         if (ret) {
427                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
428                 goto error;
429         }
430         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
431
432         /* Send init config command to mark that we are sending NVM access
433          * commands
434          */
435         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
436                                                 INIT_EXTENDED_CFG_CMD), 0,
437                                    sizeof(init_cfg), &init_cfg);
438         if (ret) {
439                 IWL_ERR(mvm, "Failed to run init config command: %d\n",
440                         ret);
441                 goto error;
442         }
443
444         /* Load NVM to NIC if needed */
445         if (mvm->nvm_file_name) {
446                 iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
447                                       mvm->nvm_sections);
448                 iwl_mvm_load_nvm_to_nic(mvm);
449         }
450
451         if (IWL_MVM_PARSE_NVM && read_nvm) {
452                 ret = iwl_nvm_init(mvm);
453                 if (ret) {
454                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
455                         goto error;
456                 }
457         }
458
459         ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
460                                                 NVM_ACCESS_COMPLETE), 0,
461                                    sizeof(nvm_complete), &nvm_complete);
462         if (ret) {
463                 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
464                         ret);
465                 goto error;
466         }
467
468         /* We wait for the INIT complete notification */
469         ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
470                                     MVM_UCODE_ALIVE_TIMEOUT);
471         if (ret)
472                 return ret;
473
474         /* Read the NVM only at driver load time, no need to do this twice */
475         if (!IWL_MVM_PARSE_NVM && read_nvm) {
476                 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
477                 if (IS_ERR(mvm->nvm_data)) {
478                         ret = PTR_ERR(mvm->nvm_data);
479                         mvm->nvm_data = NULL;
480                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
481                         return ret;
482                 }
483         }
484
485         return 0;
486
487 error:
488         iwl_remove_notification(&mvm->notif_wait, &init_wait);
489         return ret;
490 }
491
492 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
493 {
494         struct iwl_phy_cfg_cmd phy_cfg_cmd;
495         enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
496
497         /* Set parameters */
498         phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
499
500         /* set flags extra PHY configuration flags from the device's cfg */
501         phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
502
503         phy_cfg_cmd.calib_control.event_trigger =
504                 mvm->fw->default_calib[ucode_type].event_trigger;
505         phy_cfg_cmd.calib_control.flow_trigger =
506                 mvm->fw->default_calib[ucode_type].flow_trigger;
507
508         IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
509                        phy_cfg_cmd.phy_cfg);
510
511         return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
512                                     sizeof(phy_cfg_cmd), &phy_cfg_cmd);
513 }
514
515 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
516 {
517         struct iwl_notification_wait calib_wait;
518         static const u16 init_complete[] = {
519                 INIT_COMPLETE_NOTIF,
520                 CALIB_RES_NOTIF_PHY_DB
521         };
522         int ret;
523
524         if (iwl_mvm_has_unified_ucode(mvm))
525                 return iwl_run_unified_mvm_ucode(mvm, true);
526
527         lockdep_assert_held(&mvm->mutex);
528
529         if (WARN_ON_ONCE(mvm->calibrating))
530                 return 0;
531
532         iwl_init_notification_wait(&mvm->notif_wait,
533                                    &calib_wait,
534                                    init_complete,
535                                    ARRAY_SIZE(init_complete),
536                                    iwl_wait_phy_db_entry,
537                                    mvm->phy_db);
538
539         /* Will also start the device */
540         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
541         if (ret) {
542                 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
543                 goto remove_notif;
544         }
545
546         if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
547                 ret = iwl_mvm_send_bt_init_conf(mvm);
548                 if (ret)
549                         goto remove_notif;
550         }
551
552         /* Read the NVM only at driver load time, no need to do this twice */
553         if (read_nvm) {
554                 ret = iwl_nvm_init(mvm);
555                 if (ret) {
556                         IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
557                         goto remove_notif;
558                 }
559         }
560
561         /* In case we read the NVM from external file, load it to the NIC */
562         if (mvm->nvm_file_name)
563                 iwl_mvm_load_nvm_to_nic(mvm);
564
565         WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
566                   "Too old NVM version (0x%0x, required = 0x%0x)",
567                   mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
568
569         /*
570          * abort after reading the nvm in case RF Kill is on, we will complete
571          * the init seq later when RF kill will switch to off
572          */
573         if (iwl_mvm_is_radio_hw_killed(mvm)) {
574                 IWL_DEBUG_RF_KILL(mvm,
575                                   "jump over all phy activities due to RF kill\n");
576                 goto remove_notif;
577         }
578
579         mvm->calibrating = true;
580
581         /* Send TX valid antennas before triggering calibrations */
582         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
583         if (ret)
584                 goto remove_notif;
585
586         ret = iwl_send_phy_cfg_cmd(mvm);
587         if (ret) {
588                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
589                         ret);
590                 goto remove_notif;
591         }
592
593         /*
594          * Some things may run in the background now, but we
595          * just wait for the calibration complete notification.
596          */
597         ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
598                                     MVM_UCODE_CALIB_TIMEOUT);
599         if (!ret)
600                 goto out;
601
602         if (iwl_mvm_is_radio_hw_killed(mvm)) {
603                 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
604                 ret = 0;
605         } else {
606                 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
607                         ret);
608         }
609
610         goto out;
611
612 remove_notif:
613         iwl_remove_notification(&mvm->notif_wait, &calib_wait);
614 out:
615         mvm->calibrating = false;
616         if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
617                 /* we want to debug INIT and we have no NVM - fake */
618                 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
619                                         sizeof(struct ieee80211_channel) +
620                                         sizeof(struct ieee80211_rate),
621                                         GFP_KERNEL);
622                 if (!mvm->nvm_data)
623                         return -ENOMEM;
624                 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
625                 mvm->nvm_data->bands[0].n_channels = 1;
626                 mvm->nvm_data->bands[0].n_bitrates = 1;
627                 mvm->nvm_data->bands[0].bitrates =
628                         (void *)mvm->nvm_data->channels + 1;
629                 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
630         }
631
632         return ret;
633 }
634
635 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
636 {
637         struct iwl_ltr_config_cmd cmd = {
638                 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
639         };
640
641         if (!mvm->trans->ltr_enabled)
642                 return 0;
643
644         return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
645                                     sizeof(cmd), &cmd);
646 }
647
648 #ifdef CONFIG_ACPI
649 static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
650                                           union acpi_object *table,
651                                           struct iwl_mvm_sar_profile *profile,
652                                           bool enabled)
653 {
654         int i;
655
656         profile->enabled = enabled;
657
658         for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
659                 if ((table[i].type != ACPI_TYPE_INTEGER) ||
660                     (table[i].integer.value > U8_MAX))
661                         return -EINVAL;
662
663                 profile->table[i] = table[i].integer.value;
664         }
665
666         return 0;
667 }
668
669 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
670 {
671         union acpi_object *wifi_pkg, *table, *data;
672         bool enabled;
673         int ret;
674
675         data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
676         if (IS_ERR(data))
677                 return PTR_ERR(data);
678
679         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
680                                          ACPI_WRDS_WIFI_DATA_SIZE);
681         if (IS_ERR(wifi_pkg)) {
682                 ret = PTR_ERR(wifi_pkg);
683                 goto out_free;
684         }
685
686         if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
687                 ret = -EINVAL;
688                 goto out_free;
689         }
690
691         enabled = !!(wifi_pkg->package.elements[1].integer.value);
692
693         /* position of the actual table */
694         table = &wifi_pkg->package.elements[2];
695
696         /* The profile from WRDS is officially profile 1, but goes
697          * into sar_profiles[0] (because we don't have a profile 0).
698          */
699         ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
700                                       enabled);
701 out_free:
702         kfree(data);
703         return ret;
704 }
705
706 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
707 {
708         union acpi_object *wifi_pkg, *data;
709         bool enabled;
710         int i, n_profiles, ret;
711
712         data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
713         if (IS_ERR(data))
714                 return PTR_ERR(data);
715
716         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
717                                          ACPI_EWRD_WIFI_DATA_SIZE);
718         if (IS_ERR(wifi_pkg)) {
719                 ret = PTR_ERR(wifi_pkg);
720                 goto out_free;
721         }
722
723         if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
724             (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
725                 ret = -EINVAL;
726                 goto out_free;
727         }
728
729         enabled = !!(wifi_pkg->package.elements[1].integer.value);
730         n_profiles = wifi_pkg->package.elements[2].integer.value;
731
732         /*
733          * Check the validity of n_profiles.  The EWRD profiles start
734          * from index 1, so the maximum value allowed here is
735          * ACPI_SAR_PROFILES_NUM - 1.
736          */
737         if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
738                 ret = -EINVAL;
739                 goto out_free;
740         }
741
742         for (i = 0; i < n_profiles; i++) {
743                 /* the tables start at element 3 */
744                 static int pos = 3;
745
746                 /* The EWRD profiles officially go from 2 to 4, but we
747                  * save them in sar_profiles[1-3] (because we don't
748                  * have profile 0).  So in the array we start from 1.
749                  */
750                 ret = iwl_mvm_sar_set_profile(mvm,
751                                               &wifi_pkg->package.elements[pos],
752                                               &mvm->sar_profiles[i + 1],
753                                               enabled);
754                 if (ret < 0)
755                         break;
756
757                 /* go to the next table */
758                 pos += ACPI_SAR_TABLE_SIZE;
759         }
760
761 out_free:
762         kfree(data);
763         return ret;
764 }
765
766 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
767 {
768         union acpi_object *wifi_pkg, *data;
769         int i, j, ret;
770         int idx = 1;
771
772         data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
773         if (IS_ERR(data))
774                 return PTR_ERR(data);
775
776         wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
777                                          ACPI_WGDS_WIFI_DATA_SIZE);
778         if (IS_ERR(wifi_pkg)) {
779                 ret = PTR_ERR(wifi_pkg);
780                 goto out_free;
781         }
782
783         for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
784                 for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
785                         union acpi_object *entry;
786
787                         entry = &wifi_pkg->package.elements[idx++];
788                         if ((entry->type != ACPI_TYPE_INTEGER) ||
789                             (entry->integer.value > U8_MAX)) {
790                                 ret = -EINVAL;
791                                 goto out_free;
792                         }
793
794                         mvm->geo_profiles[i].values[j] = entry->integer.value;
795                 }
796         }
797         ret = 0;
798 out_free:
799         kfree(data);
800         return ret;
801 }
802
803 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
804 {
805         union {
806                 struct iwl_dev_tx_power_cmd v5;
807                 struct iwl_dev_tx_power_cmd_v4 v4;
808         } cmd;
809         int i, j, idx;
810         int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
811         int len;
812
813         BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
814         BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
815                      ACPI_SAR_TABLE_SIZE);
816
817         cmd.v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS);
818
819         if (fw_has_api(&mvm->fw->ucode_capa,
820                        IWL_UCODE_TLV_API_REDUCE_TX_POWER))
821                 len = sizeof(cmd.v5);
822         else if (fw_has_capa(&mvm->fw->ucode_capa,
823                              IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
824                 len = sizeof(cmd.v4);
825         else
826                 len = sizeof(cmd.v4.v3);
827
828         for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
829                 struct iwl_mvm_sar_profile *prof;
830
831                 /* don't allow SAR to be disabled (profile 0 means disable) */
832                 if (profs[i] == 0)
833                         return -EPERM;
834
835                 /* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
836                 if (profs[i] > ACPI_SAR_PROFILE_NUM)
837                         return -EINVAL;
838
839                 /* profiles go from 1 to 4, so decrement to access the array */
840                 prof = &mvm->sar_profiles[profs[i] - 1];
841
842                 /* if the profile is disabled, do nothing */
843                 if (!prof->enabled) {
844                         IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
845                                         profs[i]);
846                         /* if one of the profiles is disabled, we fail all */
847                         return -ENOENT;
848                 }
849
850                 IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
851                 for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
852                         idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
853                         cmd.v5.v3.per_chain_restriction[i][j] =
854                                 cpu_to_le16(prof->table[idx]);
855                         IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
856                                         j, prof->table[idx]);
857                 }
858         }
859
860         IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
861
862         return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
863 }
864
865 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
866 {
867         struct iwl_geo_tx_power_profiles_resp *resp;
868         int ret;
869
870         struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
871                 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
872         };
873         struct iwl_host_cmd cmd = {
874                 .id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
875                 .len = { sizeof(geo_cmd), },
876                 .flags = CMD_WANT_SKB,
877                 .data = { &geo_cmd },
878         };
879
880         ret = iwl_mvm_send_cmd(mvm, &cmd);
881         if (ret) {
882                 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
883                 return ret;
884         }
885
886         resp = (void *)cmd.resp_pkt->data;
887         ret = le32_to_cpu(resp->profile_idx);
888         if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
889                 ret = -EIO;
890                 IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
891         }
892
893         iwl_free_resp(&cmd);
894         return ret;
895 }
896
897 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
898 {
899         struct iwl_geo_tx_power_profiles_cmd cmd = {
900                 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
901         };
902         int ret, i, j;
903         u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
904
905         /*
906          * This command is not supported on earlier firmware versions.
907          * Unfortunately, we don't have a TLV API flag to rely on, so
908          * rely on the major version which is in the first byte of
909          * ucode_ver.
910          */
911         if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
912                 return 0;
913
914         ret = iwl_mvm_sar_get_wgds_table(mvm);
915         if (ret < 0) {
916                 IWL_DEBUG_RADIO(mvm,
917                                 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
918                                 ret);
919                 /* we don't fail if the table is not available */
920                 return 0;
921         }
922
923         IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
924
925         BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
926                      ACPI_WGDS_TABLE_SIZE + 1 !=  ACPI_WGDS_WIFI_DATA_SIZE);
927
928         BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
929
930         for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
931                 struct iwl_per_chain_offset *chain =
932                         (struct iwl_per_chain_offset *)&cmd.table[i];
933
934                 for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
935                         u8 *value;
936
937                         value = &mvm->geo_profiles[i].values[j *
938                                 ACPI_GEO_PER_CHAIN_SIZE];
939                         chain[j].max_tx_power = cpu_to_le16(value[0]);
940                         chain[j].chain_a = value[1];
941                         chain[j].chain_b = value[2];
942                         IWL_DEBUG_RADIO(mvm,
943                                         "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
944                                         i, j, value[1], value[2], value[0]);
945                 }
946         }
947         return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
948 }
949
950 #else /* CONFIG_ACPI */
951 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
952 {
953         return -ENOENT;
954 }
955
956 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
957 {
958         return -ENOENT;
959 }
960
961 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
962 {
963         return -ENOENT;
964 }
965
966 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
967 {
968         return 0;
969 }
970
971 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
972                                int prof_b)
973 {
974         return -ENOENT;
975 }
976
977 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
978 {
979         return -ENOENT;
980 }
981 #endif /* CONFIG_ACPI */
982
983 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
984 {
985         u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
986         int ret;
987         u32 resp;
988
989         struct iwl_fw_error_recovery_cmd recovery_cmd = {
990                 .flags = cpu_to_le32(flags),
991                 .buf_size = 0,
992         };
993         struct iwl_host_cmd host_cmd = {
994                 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
995                 .flags = CMD_WANT_SKB,
996                 .data = {&recovery_cmd, },
997                 .len = {sizeof(recovery_cmd), },
998         };
999
1000         /* no error log was defined in TLV */
1001         if (!error_log_size)
1002                 return;
1003
1004         if (flags & ERROR_RECOVERY_UPDATE_DB) {
1005                 /* no buf was allocated while HW reset */
1006                 if (!mvm->error_recovery_buf)
1007                         return;
1008
1009                 host_cmd.data[1] = mvm->error_recovery_buf;
1010                 host_cmd.len[1] =  error_log_size;
1011                 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1012                 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1013         }
1014
1015         ret = iwl_mvm_send_cmd(mvm, &host_cmd);
1016         kfree(mvm->error_recovery_buf);
1017         mvm->error_recovery_buf = NULL;
1018
1019         if (ret) {
1020                 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1021                 return;
1022         }
1023
1024         /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1025         if (flags & ERROR_RECOVERY_UPDATE_DB) {
1026                 resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data);
1027                 if (resp)
1028                         IWL_ERR(mvm,
1029                                 "Failed to send recovery cmd blob was invalid %d\n",
1030                                 resp);
1031         }
1032 }
1033
1034 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1035 {
1036         int ret;
1037
1038         ret = iwl_mvm_sar_get_wrds_table(mvm);
1039         if (ret < 0) {
1040                 IWL_DEBUG_RADIO(mvm,
1041                                 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1042                                 ret);
1043                 /*
1044                  * If not available, don't fail and don't bother with EWRD.
1045                  * Return 1 to tell that we can't use WGDS either.
1046                  */
1047                 return 1;
1048         }
1049
1050         ret = iwl_mvm_sar_get_ewrd_table(mvm);
1051         /* if EWRD is not available, we can still use WRDS, so don't fail */
1052         if (ret < 0)
1053                 IWL_DEBUG_RADIO(mvm,
1054                                 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1055                                 ret);
1056
1057         /* choose profile 1 (WRDS) as default for both chains */
1058         ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
1059
1060         /*
1061          * If we don't have profile 0 from BIOS, just skip it.  This
1062          * means that SAR Geo will not be enabled either, even if we
1063          * have other valid profiles.
1064          */
1065         if (ret == -ENOENT)
1066                 return 1;
1067
1068         return ret;
1069 }
1070
1071 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1072 {
1073         int ret;
1074
1075         if (iwl_mvm_has_unified_ucode(mvm))
1076                 return iwl_run_unified_mvm_ucode(mvm, false);
1077
1078         ret = iwl_run_init_mvm_ucode(mvm, false);
1079
1080         if (ret) {
1081                 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1082
1083                 if (iwlmvm_mod_params.init_dbg)
1084                         return 0;
1085                 return ret;
1086         }
1087
1088         /*
1089          * Stop and start the transport without entering low power
1090          * mode. This will save the state of other components on the
1091          * device that are triggered by the INIT firwmare (MFUART).
1092          */
1093         _iwl_trans_stop_device(mvm->trans, false);
1094         ret = _iwl_trans_start_hw(mvm->trans, false);
1095         if (ret)
1096                 return ret;
1097
1098         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
1099
1100         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1101         if (ret)
1102                 return ret;
1103
1104         iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
1105
1106         return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1107 }
1108
1109 int iwl_mvm_up(struct iwl_mvm *mvm)
1110 {
1111         int ret, i;
1112         struct ieee80211_channel *chan;
1113         struct cfg80211_chan_def chandef;
1114
1115         lockdep_assert_held(&mvm->mutex);
1116
1117         ret = iwl_trans_start_hw(mvm->trans);
1118         if (ret)
1119                 return ret;
1120
1121         ret = iwl_mvm_load_rt_fw(mvm);
1122         if (ret) {
1123                 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1124                 if (ret != -ERFKILL)
1125                         iwl_fw_dbg_error_collect(&mvm->fwrt,
1126                                                  FW_DBG_TRIGGER_DRIVER);
1127                 goto error;
1128         }
1129
1130         iwl_get_shared_mem_conf(&mvm->fwrt);
1131
1132         ret = iwl_mvm_sf_update(mvm, NULL, false);
1133         if (ret)
1134                 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1135
1136         if (!mvm->trans->ini_valid) {
1137                 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1138                 /* if we have a destination, assume EARLY START */
1139                 if (mvm->fw->dbg.dest_tlv)
1140                         mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1141                 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1142         }
1143
1144         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1145         if (ret)
1146                 goto error;
1147
1148         if (!iwl_mvm_has_unified_ucode(mvm)) {
1149                 /* Send phy db control command and then phy db calibration */
1150                 ret = iwl_send_phy_db_data(mvm->phy_db);
1151                 if (ret)
1152                         goto error;
1153
1154                 ret = iwl_send_phy_cfg_cmd(mvm);
1155                 if (ret)
1156                         goto error;
1157         }
1158
1159         ret = iwl_mvm_send_bt_init_conf(mvm);
1160         if (ret)
1161                 goto error;
1162
1163         /* Init RSS configuration */
1164         if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1165                 ret = iwl_configure_rxq(mvm);
1166                 if (ret) {
1167                         IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
1168                                 ret);
1169                         goto error;
1170                 }
1171         }
1172
1173         if (iwl_mvm_has_new_rx_api(mvm)) {
1174                 ret = iwl_send_rss_cfg_cmd(mvm);
1175                 if (ret) {
1176                         IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1177                                 ret);
1178                         goto error;
1179                 }
1180         }
1181
1182         /* init the fw <-> mac80211 STA mapping */
1183         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1184                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1185
1186         mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1187
1188         /* reset quota debouncing buffer - 0xff will yield invalid data */
1189         memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1190
1191         ret = iwl_mvm_send_dqa_cmd(mvm);
1192         if (ret)
1193                 goto error;
1194
1195         /* Add auxiliary station for scanning */
1196         ret = iwl_mvm_add_aux_sta(mvm);
1197         if (ret)
1198                 goto error;
1199
1200         /* Add all the PHY contexts */
1201         chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1202         cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1203         for (i = 0; i < NUM_PHY_CTX; i++) {
1204                 /*
1205                  * The channel used here isn't relevant as it's
1206                  * going to be overwritten in the other flows.
1207                  * For now use the first channel we have.
1208                  */
1209                 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1210                                            &chandef, 1, 1);
1211                 if (ret)
1212                         goto error;
1213         }
1214
1215 #ifdef CONFIG_THERMAL
1216         if (iwl_mvm_is_tt_in_fw(mvm)) {
1217                 /* in order to give the responsibility of ct-kill and
1218                  * TX backoff to FW we need to send empty temperature reporting
1219                  * cmd during init time
1220                  */
1221                 iwl_mvm_send_temp_report_ths_cmd(mvm);
1222         } else {
1223                 /* Initialize tx backoffs to the minimal possible */
1224                 iwl_mvm_tt_tx_backoff(mvm, 0);
1225         }
1226
1227         /* TODO: read the budget from BIOS / Platform NVM */
1228
1229         /*
1230          * In case there is no budget from BIOS / Platform NVM the default
1231          * budget should be 2000mW (cooling state 0).
1232          */
1233         if (iwl_mvm_is_ctdp_supported(mvm)) {
1234                 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1235                                            mvm->cooling_dev.cur_state);
1236                 if (ret)
1237                         goto error;
1238         }
1239 #else
1240         /* Initialize tx backoffs to the minimal possible */
1241         iwl_mvm_tt_tx_backoff(mvm, 0);
1242 #endif
1243
1244         WARN_ON(iwl_mvm_config_ltr(mvm));
1245
1246         ret = iwl_mvm_power_update_device(mvm);
1247         if (ret)
1248                 goto error;
1249
1250         /*
1251          * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1252          * anyway, so don't init MCC.
1253          */
1254         if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1255                 ret = iwl_mvm_init_mcc(mvm);
1256                 if (ret)
1257                         goto error;
1258         }
1259
1260         if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1261                 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1262                 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1263                 ret = iwl_mvm_config_scan(mvm);
1264                 if (ret)
1265                         goto error;
1266         }
1267
1268         /* allow FW/transport low power modes if not during restart */
1269         if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1270                 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1271
1272         if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1273                 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1274
1275         if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid))
1276                 IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n");
1277
1278         ret = iwl_mvm_sar_init(mvm);
1279         if (ret == 0) {
1280                 ret = iwl_mvm_sar_geo_init(mvm);
1281         } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
1282                 /*
1283                  * If basic SAR is not available, we check for WGDS,
1284                  * which should *not* be available either.  If it is
1285                  * available, issue an error, because we can't use SAR
1286                  * Geo without basic SAR.
1287                  */
1288                 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1289         }
1290
1291         if (ret < 0)
1292                 goto error;
1293
1294         iwl_mvm_leds_sync(mvm);
1295
1296         IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1297         return 0;
1298  error:
1299         if (!iwlmvm_mod_params.init_dbg || !ret)
1300                 iwl_mvm_stop_device(mvm);
1301         return ret;
1302 }
1303
1304 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1305 {
1306         int ret, i;
1307
1308         lockdep_assert_held(&mvm->mutex);
1309
1310         ret = iwl_trans_start_hw(mvm->trans);
1311         if (ret)
1312                 return ret;
1313
1314         ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1315         if (ret) {
1316                 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1317                 goto error;
1318         }
1319
1320         ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1321         if (ret)
1322                 goto error;
1323
1324         /* Send phy db control command and then phy db calibration*/
1325         ret = iwl_send_phy_db_data(mvm->phy_db);
1326         if (ret)
1327                 goto error;
1328
1329         ret = iwl_send_phy_cfg_cmd(mvm);
1330         if (ret)
1331                 goto error;
1332
1333         /* init the fw <-> mac80211 STA mapping */
1334         for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1335                 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1336
1337         /* Add auxiliary station for scanning */
1338         ret = iwl_mvm_add_aux_sta(mvm);
1339         if (ret)
1340                 goto error;
1341
1342         return 0;
1343  error:
1344         iwl_mvm_stop_device(mvm);
1345         return ret;
1346 }
1347
1348 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1349                                  struct iwl_rx_cmd_buffer *rxb)
1350 {
1351         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1352         struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1353         u32 flags = le32_to_cpu(card_state_notif->flags);
1354
1355         IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1356                           (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1357                           (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1358                           (flags & CT_KILL_CARD_DISABLED) ?
1359                           "Reached" : "Not reached");
1360 }
1361
1362 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1363                              struct iwl_rx_cmd_buffer *rxb)
1364 {
1365         struct iwl_rx_packet *pkt = rxb_addr(rxb);
1366         struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1367
1368         IWL_DEBUG_INFO(mvm,
1369                        "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1370                        le32_to_cpu(mfuart_notif->installed_ver),
1371                        le32_to_cpu(mfuart_notif->external_ver),
1372                        le32_to_cpu(mfuart_notif->status),
1373                        le32_to_cpu(mfuart_notif->duration));
1374
1375         if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1376                 IWL_DEBUG_INFO(mvm,
1377                                "MFUART: image size: 0x%08x\n",
1378                                le32_to_cpu(mfuart_notif->image_size));
1379 }