Merge branch 'mhi-net-immutable' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / wireless / intel / iwlwifi / pcie / trans.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17
18 #include "iwl-drv.h"
19 #include "iwl-trans.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-scd.h"
23 #include "iwl-agn-hw.h"
24 #include "fw/error-dump.h"
25 #include "fw/dbg.h"
26 #include "fw/api/tx.h"
27 #include "internal.h"
28 #include "iwl-fh.h"
29 #include "iwl-context-info-gen3.h"
30
31 /* extended range in FW SRAM */
32 #define IWL_FW_MEM_EXTENDED_START       0x40000
33 #define IWL_FW_MEM_EXTENDED_END         0x57FFF
34
35 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
36 {
37 #define PCI_DUMP_SIZE           352
38 #define PCI_MEM_DUMP_SIZE       64
39 #define PCI_PARENT_DUMP_SIZE    524
40 #define PREFIX_LEN              32
41         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
42         struct pci_dev *pdev = trans_pcie->pci_dev;
43         u32 i, pos, alloc_size, *ptr, *buf;
44         char *prefix;
45
46         if (trans_pcie->pcie_dbg_dumped_once)
47                 return;
48
49         /* Should be a multiple of 4 */
50         BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
51         BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
52         BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
53
54         /* Alloc a max size buffer */
55         alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
56         alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
57         alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
58         alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
59
60         buf = kmalloc(alloc_size, GFP_ATOMIC);
61         if (!buf)
62                 return;
63         prefix = (char *)buf + alloc_size - PREFIX_LEN;
64
65         IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
66
67         /* Print wifi device registers */
68         sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
69         IWL_ERR(trans, "iwlwifi device config registers:\n");
70         for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
71                 if (pci_read_config_dword(pdev, i, ptr))
72                         goto err_read;
73         print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
74
75         IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
76         for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
77                 *ptr = iwl_read32(trans, i);
78         print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
79
80         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
81         if (pos) {
82                 IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
83                 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
84                         if (pci_read_config_dword(pdev, pos + i, ptr))
85                                 goto err_read;
86                 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
87                                32, 4, buf, i, 0);
88         }
89
90         /* Print parent device registers next */
91         if (!pdev->bus->self)
92                 goto out;
93
94         pdev = pdev->bus->self;
95         sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
96
97         IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
98                 pci_name(pdev));
99         for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
100                 if (pci_read_config_dword(pdev, i, ptr))
101                         goto err_read;
102         print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
103
104         /* Print root port AER registers */
105         pos = 0;
106         pdev = pcie_find_root_port(pdev);
107         if (pdev)
108                 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
109         if (pos) {
110                 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
111                         pci_name(pdev));
112                 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
113                 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
114                         if (pci_read_config_dword(pdev, pos + i, ptr))
115                                 goto err_read;
116                 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
117                                4, buf, i, 0);
118         }
119         goto out;
120
121 err_read:
122         print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
123         IWL_ERR(trans, "Read failed at 0x%X\n", i);
124 out:
125         trans_pcie->pcie_dbg_dumped_once = 1;
126         kfree(buf);
127 }
128
129 static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
130 {
131         /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
132         iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
133         usleep_range(5000, 6000);
134 }
135
136 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
137 {
138         struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
139
140         if (!fw_mon->size)
141                 return;
142
143         dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
144                           fw_mon->physical);
145
146         fw_mon->block = NULL;
147         fw_mon->physical = 0;
148         fw_mon->size = 0;
149 }
150
151 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
152                                             u8 max_power, u8 min_power)
153 {
154         struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
155         void *block = NULL;
156         dma_addr_t physical = 0;
157         u32 size = 0;
158         u8 power;
159
160         if (fw_mon->size)
161                 return;
162
163         for (power = max_power; power >= min_power; power--) {
164                 size = BIT(power);
165                 block = dma_alloc_coherent(trans->dev, size, &physical,
166                                            GFP_KERNEL | __GFP_NOWARN);
167                 if (!block)
168                         continue;
169
170                 IWL_INFO(trans,
171                          "Allocated 0x%08x bytes for firmware monitor.\n",
172                          size);
173                 break;
174         }
175
176         if (WARN_ON_ONCE(!block))
177                 return;
178
179         if (power != max_power)
180                 IWL_ERR(trans,
181                         "Sorry - debug buffer is only %luK while you requested %luK\n",
182                         (unsigned long)BIT(power - 10),
183                         (unsigned long)BIT(max_power - 10));
184
185         fw_mon->block = block;
186         fw_mon->physical = physical;
187         fw_mon->size = size;
188 }
189
190 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
191 {
192         if (!max_power) {
193                 /* default max_power is maximum */
194                 max_power = 26;
195         } else {
196                 max_power += 11;
197         }
198
199         if (WARN(max_power > 26,
200                  "External buffer size for monitor is too big %d, check the FW TLV\n",
201                  max_power))
202                 return;
203
204         if (trans->dbg.fw_mon.size)
205                 return;
206
207         iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
208 }
209
210 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
211 {
212         iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
213                     ((reg & 0x0000ffff) | (2 << 28)));
214         return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
215 }
216
217 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
218 {
219         iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
220         iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
221                     ((reg & 0x0000ffff) | (3 << 28)));
222 }
223
224 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
225 {
226         if (trans->cfg->apmg_not_supported)
227                 return;
228
229         if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
230                 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
231                                        APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
232                                        ~APMG_PS_CTRL_MSK_PWR_SRC);
233         else
234                 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
235                                        APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
236                                        ~APMG_PS_CTRL_MSK_PWR_SRC);
237 }
238
239 /* PCI registers */
240 #define PCI_CFG_RETRY_TIMEOUT   0x041
241
242 void iwl_pcie_apm_config(struct iwl_trans *trans)
243 {
244         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
245         u16 lctl;
246         u16 cap;
247
248         /*
249          * L0S states have been found to be unstable with our devices
250          * and in newer hardware they are not officially supported at
251          * all, so we must always set the L0S_DISABLED bit.
252          */
253         iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
254
255         pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
256         trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
257
258         pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
259         trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
260         IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
261                         (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
262                         trans->ltr_enabled ? "En" : "Dis");
263 }
264
265 /*
266  * Start up NIC's basic functionality after it has been reset
267  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
268  * NOTE:  This does not load uCode nor start the embedded processor
269  */
270 static int iwl_pcie_apm_init(struct iwl_trans *trans)
271 {
272         int ret;
273
274         IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
275
276         /*
277          * Use "set_bit" below rather than "write", to preserve any hardware
278          * bits already set by default after reset.
279          */
280
281         /* Disable L0S exit timer (platform NMI Work/Around) */
282         if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
283                 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
284                             CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
285
286         /*
287          * Disable L0s without affecting L1;
288          *  don't wait for ICH L0s (ICH bug W/A)
289          */
290         iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
291                     CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
292
293         /* Set FH wait threshold to maximum (HW error during stress W/A) */
294         iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
295
296         /*
297          * Enable HAP INTA (interrupt from management bus) to
298          * wake device's PCI Express link L1a -> L0s
299          */
300         iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
301                     CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
302
303         iwl_pcie_apm_config(trans);
304
305         /* Configure analog phase-lock-loop before activating to D0A */
306         if (trans->trans_cfg->base_params->pll_cfg)
307                 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
308
309         ret = iwl_finish_nic_init(trans, trans->trans_cfg);
310         if (ret)
311                 return ret;
312
313         if (trans->cfg->host_interrupt_operation_mode) {
314                 /*
315                  * This is a bit of an abuse - This is needed for 7260 / 3160
316                  * only check host_interrupt_operation_mode even if this is
317                  * not related to host_interrupt_operation_mode.
318                  *
319                  * Enable the oscillator to count wake up time for L1 exit. This
320                  * consumes slightly more power (100uA) - but allows to be sure
321                  * that we wake up from L1 on time.
322                  *
323                  * This looks weird: read twice the same register, discard the
324                  * value, set a bit, and yet again, read that same register
325                  * just to discard the value. But that's the way the hardware
326                  * seems to like it.
327                  */
328                 iwl_read_prph(trans, OSC_CLK);
329                 iwl_read_prph(trans, OSC_CLK);
330                 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
331                 iwl_read_prph(trans, OSC_CLK);
332                 iwl_read_prph(trans, OSC_CLK);
333         }
334
335         /*
336          * Enable DMA clock and wait for it to stabilize.
337          *
338          * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
339          * bits do not disable clocks.  This preserves any hardware
340          * bits already set by default in "CLK_CTRL_REG" after reset.
341          */
342         if (!trans->cfg->apmg_not_supported) {
343                 iwl_write_prph(trans, APMG_CLK_EN_REG,
344                                APMG_CLK_VAL_DMA_CLK_RQT);
345                 udelay(20);
346
347                 /* Disable L1-Active */
348                 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
349                                   APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
350
351                 /* Clear the interrupt in APMG if the NIC is in RFKILL */
352                 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
353                                APMG_RTC_INT_STT_RFKILL);
354         }
355
356         set_bit(STATUS_DEVICE_ENABLED, &trans->status);
357
358         return 0;
359 }
360
361 /*
362  * Enable LP XTAL to avoid HW bug where device may consume much power if
363  * FW is not loaded after device reset. LP XTAL is disabled by default
364  * after device HW reset. Do it only if XTAL is fed by internal source.
365  * Configure device's "persistence" mode to avoid resetting XTAL again when
366  * SHRD_HW_RST occurs in S3.
367  */
368 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
369 {
370         int ret;
371         u32 apmg_gp1_reg;
372         u32 apmg_xtal_cfg_reg;
373         u32 dl_cfg_reg;
374
375         /* Force XTAL ON */
376         __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
377                                  CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
378
379         iwl_trans_pcie_sw_reset(trans);
380
381         ret = iwl_finish_nic_init(trans, trans->trans_cfg);
382         if (WARN_ON(ret)) {
383                 /* Release XTAL ON request */
384                 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
385                                            CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
386                 return;
387         }
388
389         /*
390          * Clear "disable persistence" to avoid LP XTAL resetting when
391          * SHRD_HW_RST is applied in S3.
392          */
393         iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
394                                     APMG_PCIDEV_STT_VAL_PERSIST_DIS);
395
396         /*
397          * Force APMG XTAL to be active to prevent its disabling by HW
398          * caused by APMG idle state.
399          */
400         apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
401                                                     SHR_APMG_XTAL_CFG_REG);
402         iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
403                                  apmg_xtal_cfg_reg |
404                                  SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
405
406         iwl_trans_pcie_sw_reset(trans);
407
408         /* Enable LP XTAL by indirect access through CSR */
409         apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
410         iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
411                                  SHR_APMG_GP1_WF_XTAL_LP_EN |
412                                  SHR_APMG_GP1_CHICKEN_BIT_SELECT);
413
414         /* Clear delay line clock power up */
415         dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
416         iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
417                                  ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
418
419         /*
420          * Enable persistence mode to avoid LP XTAL resetting when
421          * SHRD_HW_RST is applied in S3.
422          */
423         iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
424                     CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
425
426         /*
427          * Clear "initialization complete" bit to move adapter from
428          * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
429          */
430         iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
431
432         /* Activates XTAL resources monitor */
433         __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
434                                  CSR_MONITOR_XTAL_RESOURCES);
435
436         /* Release XTAL ON request */
437         __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
438                                    CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
439         udelay(10);
440
441         /* Release APMG XTAL */
442         iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
443                                  apmg_xtal_cfg_reg &
444                                  ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
445 }
446
447 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
448 {
449         int ret;
450
451         /* stop device's busmaster DMA activity */
452         iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
453
454         ret = iwl_poll_bit(trans, CSR_RESET,
455                            CSR_RESET_REG_FLAG_MASTER_DISABLED,
456                            CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
457         if (ret < 0)
458                 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
459
460         IWL_DEBUG_INFO(trans, "stop master\n");
461 }
462
463 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
464 {
465         IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
466
467         if (op_mode_leave) {
468                 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
469                         iwl_pcie_apm_init(trans);
470
471                 /* inform ME that we are leaving */
472                 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
473                         iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
474                                           APMG_PCIDEV_STT_VAL_WAKE_ME);
475                 else if (trans->trans_cfg->device_family >=
476                          IWL_DEVICE_FAMILY_8000) {
477                         iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
478                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
479                         iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
480                                     CSR_HW_IF_CONFIG_REG_PREPARE |
481                                     CSR_HW_IF_CONFIG_REG_ENABLE_PME);
482                         mdelay(1);
483                         iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
484                                       CSR_RESET_LINK_PWR_MGMT_DISABLED);
485                 }
486                 mdelay(5);
487         }
488
489         clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
490
491         /* Stop device's DMA activity */
492         iwl_pcie_apm_stop_master(trans);
493
494         if (trans->cfg->lp_xtal_workaround) {
495                 iwl_pcie_apm_lp_xtal_enable(trans);
496                 return;
497         }
498
499         iwl_trans_pcie_sw_reset(trans);
500
501         /*
502          * Clear "initialization complete" bit to move adapter from
503          * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
504          */
505         iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
506 }
507
508 static int iwl_pcie_nic_init(struct iwl_trans *trans)
509 {
510         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
511         int ret;
512
513         /* nic_init */
514         spin_lock(&trans_pcie->irq_lock);
515         ret = iwl_pcie_apm_init(trans);
516         spin_unlock(&trans_pcie->irq_lock);
517
518         if (ret)
519                 return ret;
520
521         iwl_pcie_set_pwr(trans, false);
522
523         iwl_op_mode_nic_config(trans->op_mode);
524
525         /* Allocate the RX queue, or reset if it is already allocated */
526         iwl_pcie_rx_init(trans);
527
528         /* Allocate or reset and init all Tx and Command queues */
529         if (iwl_pcie_tx_init(trans))
530                 return -ENOMEM;
531
532         if (trans->trans_cfg->base_params->shadow_reg_enable) {
533                 /* enable shadow regs in HW */
534                 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
535                 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
536         }
537
538         return 0;
539 }
540
541 #define HW_READY_TIMEOUT (50)
542
543 /* Note: returns poll_bit return value, which is >= 0 if success */
544 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
545 {
546         int ret;
547
548         iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
549                     CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
550
551         /* See if we got it */
552         ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
553                            CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
554                            CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
555                            HW_READY_TIMEOUT);
556
557         if (ret >= 0)
558                 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
559
560         IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
561         return ret;
562 }
563
564 /* Note: returns standard 0/-ERROR code */
565 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
566 {
567         int ret;
568         int t = 0;
569         int iter;
570
571         IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
572
573         ret = iwl_pcie_set_hw_ready(trans);
574         /* If the card is ready, exit 0 */
575         if (ret >= 0)
576                 return 0;
577
578         iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
579                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
580         usleep_range(1000, 2000);
581
582         for (iter = 0; iter < 10; iter++) {
583                 /* If HW is not ready, prepare the conditions to check again */
584                 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
585                             CSR_HW_IF_CONFIG_REG_PREPARE);
586
587                 do {
588                         ret = iwl_pcie_set_hw_ready(trans);
589                         if (ret >= 0)
590                                 return 0;
591
592                         usleep_range(200, 1000);
593                         t += 200;
594                 } while (t < 150000);
595                 msleep(25);
596         }
597
598         IWL_ERR(trans, "Couldn't prepare the card\n");
599
600         return ret;
601 }
602
603 /*
604  * ucode
605  */
606 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
607                                             u32 dst_addr, dma_addr_t phy_addr,
608                                             u32 byte_cnt)
609 {
610         iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
611                     FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
612
613         iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
614                     dst_addr);
615
616         iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
617                     phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
618
619         iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
620                     (iwl_get_dma_hi_addr(phy_addr)
621                         << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
622
623         iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
624                     BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
625                     BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
626                     FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
627
628         iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
629                     FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
630                     FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
631                     FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
632 }
633
634 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
635                                         u32 dst_addr, dma_addr_t phy_addr,
636                                         u32 byte_cnt)
637 {
638         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
639         unsigned long flags;
640         int ret;
641
642         trans_pcie->ucode_write_complete = false;
643
644         if (!iwl_trans_grab_nic_access(trans, &flags))
645                 return -EIO;
646
647         iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
648                                         byte_cnt);
649         iwl_trans_release_nic_access(trans, &flags);
650
651         ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
652                                  trans_pcie->ucode_write_complete, 5 * HZ);
653         if (!ret) {
654                 IWL_ERR(trans, "Failed to load firmware chunk!\n");
655                 iwl_trans_pcie_dump_regs(trans);
656                 return -ETIMEDOUT;
657         }
658
659         return 0;
660 }
661
662 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
663                             const struct fw_desc *section)
664 {
665         u8 *v_addr;
666         dma_addr_t p_addr;
667         u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
668         int ret = 0;
669
670         IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
671                      section_num);
672
673         v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
674                                     GFP_KERNEL | __GFP_NOWARN);
675         if (!v_addr) {
676                 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
677                 chunk_sz = PAGE_SIZE;
678                 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
679                                             &p_addr, GFP_KERNEL);
680                 if (!v_addr)
681                         return -ENOMEM;
682         }
683
684         for (offset = 0; offset < section->len; offset += chunk_sz) {
685                 u32 copy_size, dst_addr;
686                 bool extended_addr = false;
687
688                 copy_size = min_t(u32, chunk_sz, section->len - offset);
689                 dst_addr = section->offset + offset;
690
691                 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
692                     dst_addr <= IWL_FW_MEM_EXTENDED_END)
693                         extended_addr = true;
694
695                 if (extended_addr)
696                         iwl_set_bits_prph(trans, LMPM_CHICK,
697                                           LMPM_CHICK_EXTENDED_ADDR_SPACE);
698
699                 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
700                 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
701                                                    copy_size);
702
703                 if (extended_addr)
704                         iwl_clear_bits_prph(trans, LMPM_CHICK,
705                                             LMPM_CHICK_EXTENDED_ADDR_SPACE);
706
707                 if (ret) {
708                         IWL_ERR(trans,
709                                 "Could not load the [%d] uCode section\n",
710                                 section_num);
711                         break;
712                 }
713         }
714
715         dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
716         return ret;
717 }
718
719 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
720                                            const struct fw_img *image,
721                                            int cpu,
722                                            int *first_ucode_section)
723 {
724         int shift_param;
725         int i, ret = 0, sec_num = 0x1;
726         u32 val, last_read_idx = 0;
727
728         if (cpu == 1) {
729                 shift_param = 0;
730                 *first_ucode_section = 0;
731         } else {
732                 shift_param = 16;
733                 (*first_ucode_section)++;
734         }
735
736         for (i = *first_ucode_section; i < image->num_sec; i++) {
737                 last_read_idx = i;
738
739                 /*
740                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
741                  * CPU1 to CPU2.
742                  * PAGING_SEPARATOR_SECTION delimiter - separate between
743                  * CPU2 non paged to CPU2 paging sec.
744                  */
745                 if (!image->sec[i].data ||
746                     image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
747                     image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
748                         IWL_DEBUG_FW(trans,
749                                      "Break since Data not valid or Empty section, sec = %d\n",
750                                      i);
751                         break;
752                 }
753
754                 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
755                 if (ret)
756                         return ret;
757
758                 /* Notify ucode of loaded section number and status */
759                 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
760                 val = val | (sec_num << shift_param);
761                 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
762
763                 sec_num = (sec_num << 1) | 0x1;
764         }
765
766         *first_ucode_section = last_read_idx;
767
768         iwl_enable_interrupts(trans);
769
770         if (trans->trans_cfg->use_tfh) {
771                 if (cpu == 1)
772                         iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
773                                        0xFFFF);
774                 else
775                         iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
776                                        0xFFFFFFFF);
777         } else {
778                 if (cpu == 1)
779                         iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
780                                            0xFFFF);
781                 else
782                         iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
783                                            0xFFFFFFFF);
784         }
785
786         return 0;
787 }
788
789 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
790                                       const struct fw_img *image,
791                                       int cpu,
792                                       int *first_ucode_section)
793 {
794         int i, ret = 0;
795         u32 last_read_idx = 0;
796
797         if (cpu == 1)
798                 *first_ucode_section = 0;
799         else
800                 (*first_ucode_section)++;
801
802         for (i = *first_ucode_section; i < image->num_sec; i++) {
803                 last_read_idx = i;
804
805                 /*
806                  * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
807                  * CPU1 to CPU2.
808                  * PAGING_SEPARATOR_SECTION delimiter - separate between
809                  * CPU2 non paged to CPU2 paging sec.
810                  */
811                 if (!image->sec[i].data ||
812                     image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
813                     image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
814                         IWL_DEBUG_FW(trans,
815                                      "Break since Data not valid or Empty section, sec = %d\n",
816                                      i);
817                         break;
818                 }
819
820                 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
821                 if (ret)
822                         return ret;
823         }
824
825         *first_ucode_section = last_read_idx;
826
827         return 0;
828 }
829
830 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
831 {
832         enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
833         struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
834                 &trans->dbg.fw_mon_cfg[alloc_id];
835         struct iwl_dram_data *frag;
836
837         if (!iwl_trans_dbg_ini_valid(trans))
838                 return;
839
840         if (le32_to_cpu(fw_mon_cfg->buf_location) ==
841             IWL_FW_INI_LOCATION_SRAM_PATH) {
842                 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
843                 /* set sram monitor by enabling bit 7 */
844                 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
845                             CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
846
847                 return;
848         }
849
850         if (le32_to_cpu(fw_mon_cfg->buf_location) !=
851             IWL_FW_INI_LOCATION_DRAM_PATH ||
852             !trans->dbg.fw_mon_ini[alloc_id].num_frags)
853                 return;
854
855         frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
856
857         IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
858                      alloc_id);
859
860         iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
861                             frag->physical >> MON_BUFF_SHIFT_VER2);
862         iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
863                             (frag->physical + frag->size - 256) >>
864                             MON_BUFF_SHIFT_VER2);
865 }
866
867 void iwl_pcie_apply_destination(struct iwl_trans *trans)
868 {
869         const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
870         const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
871         int i;
872
873         if (iwl_trans_dbg_ini_valid(trans)) {
874                 iwl_pcie_apply_destination_ini(trans);
875                 return;
876         }
877
878         IWL_INFO(trans, "Applying debug destination %s\n",
879                  get_fw_dbg_mode_string(dest->monitor_mode));
880
881         if (dest->monitor_mode == EXTERNAL_MODE)
882                 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
883         else
884                 IWL_WARN(trans, "PCI should have external buffer debug\n");
885
886         for (i = 0; i < trans->dbg.n_dest_reg; i++) {
887                 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
888                 u32 val = le32_to_cpu(dest->reg_ops[i].val);
889
890                 switch (dest->reg_ops[i].op) {
891                 case CSR_ASSIGN:
892                         iwl_write32(trans, addr, val);
893                         break;
894                 case CSR_SETBIT:
895                         iwl_set_bit(trans, addr, BIT(val));
896                         break;
897                 case CSR_CLEARBIT:
898                         iwl_clear_bit(trans, addr, BIT(val));
899                         break;
900                 case PRPH_ASSIGN:
901                         iwl_write_prph(trans, addr, val);
902                         break;
903                 case PRPH_SETBIT:
904                         iwl_set_bits_prph(trans, addr, BIT(val));
905                         break;
906                 case PRPH_CLEARBIT:
907                         iwl_clear_bits_prph(trans, addr, BIT(val));
908                         break;
909                 case PRPH_BLOCKBIT:
910                         if (iwl_read_prph(trans, addr) & BIT(val)) {
911                                 IWL_ERR(trans,
912                                         "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
913                                         val, addr);
914                                 goto monitor;
915                         }
916                         break;
917                 default:
918                         IWL_ERR(trans, "FW debug - unknown OP %d\n",
919                                 dest->reg_ops[i].op);
920                         break;
921                 }
922         }
923
924 monitor:
925         if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
926                 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
927                                fw_mon->physical >> dest->base_shift);
928                 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
929                         iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
930                                        (fw_mon->physical + fw_mon->size -
931                                         256) >> dest->end_shift);
932                 else
933                         iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
934                                        (fw_mon->physical + fw_mon->size) >>
935                                        dest->end_shift);
936         }
937 }
938
939 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
940                                 const struct fw_img *image)
941 {
942         int ret = 0;
943         int first_ucode_section;
944
945         IWL_DEBUG_FW(trans, "working with %s CPU\n",
946                      image->is_dual_cpus ? "Dual" : "Single");
947
948         /* load to FW the binary non secured sections of CPU1 */
949         ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
950         if (ret)
951                 return ret;
952
953         if (image->is_dual_cpus) {
954                 /* set CPU2 header address */
955                 iwl_write_prph(trans,
956                                LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
957                                LMPM_SECURE_CPU2_HDR_MEM_SPACE);
958
959                 /* load to FW the binary sections of CPU2 */
960                 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
961                                                  &first_ucode_section);
962                 if (ret)
963                         return ret;
964         }
965
966         if (iwl_pcie_dbg_on(trans))
967                 iwl_pcie_apply_destination(trans);
968
969         iwl_enable_interrupts(trans);
970
971         /* release CPU reset */
972         iwl_write32(trans, CSR_RESET, 0);
973
974         return 0;
975 }
976
977 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
978                                           const struct fw_img *image)
979 {
980         int ret = 0;
981         int first_ucode_section;
982
983         IWL_DEBUG_FW(trans, "working with %s CPU\n",
984                      image->is_dual_cpus ? "Dual" : "Single");
985
986         if (iwl_pcie_dbg_on(trans))
987                 iwl_pcie_apply_destination(trans);
988
989         IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
990                         iwl_read_prph(trans, WFPM_GP2));
991
992         /*
993          * Set default value. On resume reading the values that were
994          * zeored can provide debug data on the resume flow.
995          * This is for debugging only and has no functional impact.
996          */
997         iwl_write_prph(trans, WFPM_GP2, 0x01010101);
998
999         /* configure the ucode to be ready to get the secured image */
1000         /* release CPU reset */
1001         iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1002
1003         /* load to FW the binary Secured sections of CPU1 */
1004         ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1005                                               &first_ucode_section);
1006         if (ret)
1007                 return ret;
1008
1009         /* load to FW the binary sections of CPU2 */
1010         return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1011                                                &first_ucode_section);
1012 }
1013
1014 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1015 {
1016         struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1017         bool hw_rfkill = iwl_is_rfkill_set(trans);
1018         bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1019         bool report;
1020
1021         if (hw_rfkill) {
1022                 set_bit(STATUS_RFKILL_HW, &trans->status);
1023                 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1024         } else {
1025                 clear_bit(STATUS_RFKILL_HW, &trans->status);
1026                 if (trans_pcie->opmode_down)
1027                         clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1028         }
1029
1030         report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1031
1032         if (prev != report)
1033                 iwl_trans_pcie_rf_kill(trans, report);
1034
1035         return hw_rfkill;
1036 }
1037
1038 struct iwl_causes_list {
1039         u32 cause_num;
1040         u32 mask_reg;
1041         u8 addr;
1042 };
1043
1044 static struct iwl_causes_list causes_list[] = {
1045         {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
1046         {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
1047         {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
1048         {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
1049         {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
1050         {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
1051         {MSIX_HW_INT_CAUSES_REG_RESET_DONE,     CSR_MSIX_HW_INT_MASK_AD, 0x12},
1052         {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
1053         {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
1054         {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
1055         {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
1056         {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1057         {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1058         {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1059         {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1060 };
1061
1062 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1063 {
1064         struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1065         int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1066         int i, arr_size = ARRAY_SIZE(causes_list);
1067         struct iwl_causes_list *causes = causes_list;
1068
1069         /*
1070          * Access all non RX causes and map them to the default irq.
1071          * In case we are missing at least one interrupt vector,
1072          * the first interrupt vector will serve non-RX and FBQ causes.
1073          */
1074         for (i = 0; i < arr_size; i++) {
1075                 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1076                 iwl_clear_bit(trans, causes[i].mask_reg,
1077                               causes[i].cause_num);
1078         }
1079 }
1080
1081 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1082 {
1083         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084         u32 offset =
1085                 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1086         u32 val, idx;
1087
1088         /*
1089          * The first RX queue - fallback queue, which is designated for
1090          * management frame, command responses etc, is always mapped to the
1091          * first interrupt vector. The other RX queues are mapped to
1092          * the other (N - 2) interrupt vectors.
1093          */
1094         val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1095         for (idx = 1; idx < trans->num_rx_queues; idx++) {
1096                 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1097                            MSIX_FH_INT_CAUSES_Q(idx - offset));
1098                 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1099         }
1100         iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1101
1102         val = MSIX_FH_INT_CAUSES_Q(0);
1103         if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1104                 val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1105         iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1106
1107         if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1108                 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1109 }
1110
1111 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1112 {
1113         struct iwl_trans *trans = trans_pcie->trans;
1114
1115         if (!trans_pcie->msix_enabled) {
1116                 if (trans->trans_cfg->mq_rx_supported &&
1117                     test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1118                         iwl_write_umac_prph(trans, UREG_CHICK,
1119                                             UREG_CHICK_MSI_ENABLE);
1120                 return;
1121         }
1122         /*
1123          * The IVAR table needs to be configured again after reset,
1124          * but if the device is disabled, we can't write to
1125          * prph.
1126          */
1127         if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1128                 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1129
1130         /*
1131          * Each cause from the causes list above and the RX causes is
1132          * represented as a byte in the IVAR table. The first nibble
1133          * represents the bound interrupt vector of the cause, the second
1134          * represents no auto clear for this cause. This will be set if its
1135          * interrupt vector is bound to serve other causes.
1136          */
1137         iwl_pcie_map_rx_causes(trans);
1138
1139         iwl_pcie_map_non_rx_causes(trans);
1140 }
1141
1142 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1143 {
1144         struct iwl_trans *trans = trans_pcie->trans;
1145
1146         iwl_pcie_conf_msix_hw(trans_pcie);
1147
1148         if (!trans_pcie->msix_enabled)
1149                 return;
1150
1151         trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1152         trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1153         trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1154         trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1155 }
1156
1157 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1158 {
1159         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1160
1161         lockdep_assert_held(&trans_pcie->mutex);
1162
1163         if (trans_pcie->is_down)
1164                 return;
1165
1166         trans_pcie->is_down = true;
1167
1168         /* tell the device to stop sending interrupts */
1169         iwl_disable_interrupts(trans);
1170
1171         /* device going down, Stop using ICT table */
1172         iwl_pcie_disable_ict(trans);
1173
1174         /*
1175          * If a HW restart happens during firmware loading,
1176          * then the firmware loading might call this function
1177          * and later it might be called again due to the
1178          * restart. So don't process again if the device is
1179          * already dead.
1180          */
1181         if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1182                 IWL_DEBUG_INFO(trans,
1183                                "DEVICE_ENABLED bit was set and is now cleared\n");
1184                 iwl_pcie_tx_stop(trans);
1185                 iwl_pcie_rx_stop(trans);
1186
1187                 /* Power-down device's busmaster DMA clocks */
1188                 if (!trans->cfg->apmg_not_supported) {
1189                         iwl_write_prph(trans, APMG_CLK_DIS_REG,
1190                                        APMG_CLK_VAL_DMA_CLK_RQT);
1191                         udelay(5);
1192                 }
1193         }
1194
1195         /* Make sure (redundant) we've released our request to stay awake */
1196         iwl_clear_bit(trans, CSR_GP_CNTRL,
1197                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1198
1199         /* Stop the device, and put it in low power state */
1200         iwl_pcie_apm_stop(trans, false);
1201
1202         iwl_trans_pcie_sw_reset(trans);
1203
1204         /*
1205          * Upon stop, the IVAR table gets erased, so msi-x won't
1206          * work. This causes a bug in RF-KILL flows, since the interrupt
1207          * that enables radio won't fire on the correct irq, and the
1208          * driver won't be able to handle the interrupt.
1209          * Configure the IVAR table again after reset.
1210          */
1211         iwl_pcie_conf_msix_hw(trans_pcie);
1212
1213         /*
1214          * Upon stop, the APM issues an interrupt if HW RF kill is set.
1215          * This is a bug in certain verions of the hardware.
1216          * Certain devices also keep sending HW RF kill interrupt all
1217          * the time, unless the interrupt is ACKed even if the interrupt
1218          * should be masked. Re-ACK all the interrupts here.
1219          */
1220         iwl_disable_interrupts(trans);
1221
1222         /* clear all status bits */
1223         clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1224         clear_bit(STATUS_INT_ENABLED, &trans->status);
1225         clear_bit(STATUS_TPOWER_PMI, &trans->status);
1226
1227         /*
1228          * Even if we stop the HW, we still want the RF kill
1229          * interrupt
1230          */
1231         iwl_enable_rfkill_int(trans);
1232
1233         /* re-take ownership to prevent other users from stealing the device */
1234         iwl_pcie_prepare_card_hw(trans);
1235 }
1236
1237 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1238 {
1239         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1240
1241         if (trans_pcie->msix_enabled) {
1242                 int i;
1243
1244                 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1245                         synchronize_irq(trans_pcie->msix_entries[i].vector);
1246         } else {
1247                 synchronize_irq(trans_pcie->pci_dev->irq);
1248         }
1249 }
1250
1251 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1252                                    const struct fw_img *fw, bool run_in_rfkill)
1253 {
1254         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1255         bool hw_rfkill;
1256         int ret;
1257
1258         /* This may fail if AMT took ownership of the device */
1259         if (iwl_pcie_prepare_card_hw(trans)) {
1260                 IWL_WARN(trans, "Exit HW not ready\n");
1261                 ret = -EIO;
1262                 goto out;
1263         }
1264
1265         iwl_enable_rfkill_int(trans);
1266
1267         iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1268
1269         /*
1270          * We enabled the RF-Kill interrupt and the handler may very
1271          * well be running. Disable the interrupts to make sure no other
1272          * interrupt can be fired.
1273          */
1274         iwl_disable_interrupts(trans);
1275
1276         /* Make sure it finished running */
1277         iwl_pcie_synchronize_irqs(trans);
1278
1279         mutex_lock(&trans_pcie->mutex);
1280
1281         /* If platform's RF_KILL switch is NOT set to KILL */
1282         hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1283         if (hw_rfkill && !run_in_rfkill) {
1284                 ret = -ERFKILL;
1285                 goto out;
1286         }
1287
1288         /* Someone called stop_device, don't try to start_fw */
1289         if (trans_pcie->is_down) {
1290                 IWL_WARN(trans,
1291                          "Can't start_fw since the HW hasn't been started\n");
1292                 ret = -EIO;
1293                 goto out;
1294         }
1295
1296         /* make sure rfkill handshake bits are cleared */
1297         iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1298         iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1299                     CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1300
1301         /* clear (again), then enable host interrupts */
1302         iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1303
1304         ret = iwl_pcie_nic_init(trans);
1305         if (ret) {
1306                 IWL_ERR(trans, "Unable to init nic\n");
1307                 goto out;
1308         }
1309
1310         /*
1311          * Now, we load the firmware and don't want to be interrupted, even
1312          * by the RF-Kill interrupt (hence mask all the interrupt besides the
1313          * FH_TX interrupt which is needed to load the firmware). If the
1314          * RF-Kill switch is toggled, we will find out after having loaded
1315          * the firmware and return the proper value to the caller.
1316          */
1317         iwl_enable_fw_load_int(trans);
1318
1319         /* really make sure rfkill handshake bits are cleared */
1320         iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1321         iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1322
1323         /* Load the given image to the HW */
1324         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1325                 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1326         else
1327                 ret = iwl_pcie_load_given_ucode(trans, fw);
1328
1329         /* re-check RF-Kill state since we may have missed the interrupt */
1330         hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1331         if (hw_rfkill && !run_in_rfkill)
1332                 ret = -ERFKILL;
1333
1334 out:
1335         mutex_unlock(&trans_pcie->mutex);
1336         return ret;
1337 }
1338
1339 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1340 {
1341         iwl_pcie_reset_ict(trans);
1342         iwl_pcie_tx_start(trans, scd_addr);
1343 }
1344
1345 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1346                                        bool was_in_rfkill)
1347 {
1348         bool hw_rfkill;
1349
1350         /*
1351          * Check again since the RF kill state may have changed while
1352          * all the interrupts were disabled, in this case we couldn't
1353          * receive the RF kill interrupt and update the state in the
1354          * op_mode.
1355          * Don't call the op_mode if the rkfill state hasn't changed.
1356          * This allows the op_mode to call stop_device from the rfkill
1357          * notification without endless recursion. Under very rare
1358          * circumstances, we might have a small recursion if the rfkill
1359          * state changed exactly now while we were called from stop_device.
1360          * This is very unlikely but can happen and is supported.
1361          */
1362         hw_rfkill = iwl_is_rfkill_set(trans);
1363         if (hw_rfkill) {
1364                 set_bit(STATUS_RFKILL_HW, &trans->status);
1365                 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1366         } else {
1367                 clear_bit(STATUS_RFKILL_HW, &trans->status);
1368                 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1369         }
1370         if (hw_rfkill != was_in_rfkill)
1371                 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1372 }
1373
1374 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1375 {
1376         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1377         bool was_in_rfkill;
1378
1379         mutex_lock(&trans_pcie->mutex);
1380         trans_pcie->opmode_down = true;
1381         was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1382         _iwl_trans_pcie_stop_device(trans);
1383         iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1384         mutex_unlock(&trans_pcie->mutex);
1385 }
1386
1387 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1388 {
1389         struct iwl_trans_pcie __maybe_unused *trans_pcie =
1390                 IWL_TRANS_GET_PCIE_TRANS(trans);
1391
1392         lockdep_assert_held(&trans_pcie->mutex);
1393
1394         IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1395                  state ? "disabled" : "enabled");
1396         if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1397                 if (trans->trans_cfg->gen2)
1398                         _iwl_trans_pcie_gen2_stop_device(trans);
1399                 else
1400                         _iwl_trans_pcie_stop_device(trans);
1401         }
1402 }
1403
1404 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1405                                   bool test, bool reset)
1406 {
1407         iwl_disable_interrupts(trans);
1408
1409         /*
1410          * in testing mode, the host stays awake and the
1411          * hardware won't be reset (not even partially)
1412          */
1413         if (test)
1414                 return;
1415
1416         iwl_pcie_disable_ict(trans);
1417
1418         iwl_pcie_synchronize_irqs(trans);
1419
1420         iwl_clear_bit(trans, CSR_GP_CNTRL,
1421                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1422         iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1423
1424         if (reset) {
1425                 /*
1426                  * reset TX queues -- some of their registers reset during S3
1427                  * so if we don't reset everything here the D3 image would try
1428                  * to execute some invalid memory upon resume
1429                  */
1430                 iwl_trans_pcie_tx_reset(trans);
1431         }
1432
1433         iwl_pcie_set_pwr(trans, true);
1434 }
1435
1436 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1437                                      bool reset)
1438 {
1439         int ret;
1440         struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1441
1442         if (!reset)
1443                 /* Enable persistence mode to avoid reset */
1444                 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1445                             CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1446
1447         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1448                 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1449                                     UREG_DOORBELL_TO_ISR6_SUSPEND);
1450
1451                 ret = wait_event_timeout(trans_pcie->sx_waitq,
1452                                          trans_pcie->sx_complete, 2 * HZ);
1453                 /*
1454                  * Invalidate it toward resume.
1455                  */
1456                 trans_pcie->sx_complete = false;
1457
1458                 if (!ret) {
1459                         IWL_ERR(trans, "Timeout entering D3\n");
1460                         return -ETIMEDOUT;
1461                 }
1462         }
1463         iwl_pcie_d3_complete_suspend(trans, test, reset);
1464
1465         return 0;
1466 }
1467
1468 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1469                                     enum iwl_d3_status *status,
1470                                     bool test,  bool reset)
1471 {
1472         struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1473         u32 val;
1474         int ret;
1475
1476         if (test) {
1477                 iwl_enable_interrupts(trans);
1478                 *status = IWL_D3_STATUS_ALIVE;
1479                 goto out;
1480         }
1481
1482         iwl_set_bit(trans, CSR_GP_CNTRL,
1483                     CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1484
1485         ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1486         if (ret)
1487                 return ret;
1488
1489         /*
1490          * Reconfigure IVAR table in case of MSIX or reset ict table in
1491          * MSI mode since HW reset erased it.
1492          * Also enables interrupts - none will happen as
1493          * the device doesn't know we're waking it up, only when
1494          * the opmode actually tells it after this call.
1495          */
1496         iwl_pcie_conf_msix_hw(trans_pcie);
1497         if (!trans_pcie->msix_enabled)
1498                 iwl_pcie_reset_ict(trans);
1499         iwl_enable_interrupts(trans);
1500
1501         iwl_pcie_set_pwr(trans, false);
1502
1503         if (!reset) {
1504                 iwl_clear_bit(trans, CSR_GP_CNTRL,
1505                               CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1506         } else {
1507                 iwl_trans_pcie_tx_reset(trans);
1508
1509                 ret = iwl_pcie_rx_init(trans);
1510                 if (ret) {
1511                         IWL_ERR(trans,
1512                                 "Failed to resume the device (RX reset)\n");
1513                         return ret;
1514                 }
1515         }
1516
1517         IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1518                         iwl_read_umac_prph(trans, WFPM_GP2));
1519
1520         val = iwl_read32(trans, CSR_RESET);
1521         if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1522                 *status = IWL_D3_STATUS_RESET;
1523         else
1524                 *status = IWL_D3_STATUS_ALIVE;
1525
1526 out:
1527         if (*status == IWL_D3_STATUS_ALIVE &&
1528             trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1529                 trans_pcie->sx_complete = false;
1530                 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1531                                     UREG_DOORBELL_TO_ISR6_RESUME);
1532
1533                 ret = wait_event_timeout(trans_pcie->sx_waitq,
1534                                          trans_pcie->sx_complete, 2 * HZ);
1535                 /*
1536                  * Invalidate it toward next suspend.
1537                  */
1538                 trans_pcie->sx_complete = false;
1539
1540                 if (!ret) {
1541                         IWL_ERR(trans, "Timeout exiting D3\n");
1542                         return -ETIMEDOUT;
1543                 }
1544         }
1545         return 0;
1546 }
1547
1548 static void
1549 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1550                             struct iwl_trans *trans,
1551                             const struct iwl_cfg_trans_params *cfg_trans)
1552 {
1553         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1554         int max_irqs, num_irqs, i, ret;
1555         u16 pci_cmd;
1556         u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1557
1558         if (!cfg_trans->mq_rx_supported)
1559                 goto enable_msi;
1560
1561         if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1562                 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1563
1564         max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1565         for (i = 0; i < max_irqs; i++)
1566                 trans_pcie->msix_entries[i].entry = i;
1567
1568         num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1569                                          MSIX_MIN_INTERRUPT_VECTORS,
1570                                          max_irqs);
1571         if (num_irqs < 0) {
1572                 IWL_DEBUG_INFO(trans,
1573                                "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1574                                num_irqs);
1575                 goto enable_msi;
1576         }
1577         trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1578
1579         IWL_DEBUG_INFO(trans,
1580                        "MSI-X enabled. %d interrupt vectors were allocated\n",
1581                        num_irqs);
1582
1583         /*
1584          * In case the OS provides fewer interrupts than requested, different
1585          * causes will share the same interrupt vector as follows:
1586          * One interrupt less: non rx causes shared with FBQ.
1587          * Two interrupts less: non rx causes shared with FBQ and RSS.
1588          * More than two interrupts: we will use fewer RSS queues.
1589          */
1590         if (num_irqs <= max_irqs - 2) {
1591                 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1592                 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1593                         IWL_SHARED_IRQ_FIRST_RSS;
1594         } else if (num_irqs == max_irqs - 1) {
1595                 trans_pcie->trans->num_rx_queues = num_irqs;
1596                 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1597         } else {
1598                 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1599         }
1600         WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1601
1602         trans_pcie->alloc_vecs = num_irqs;
1603         trans_pcie->msix_enabled = true;
1604         return;
1605
1606 enable_msi:
1607         ret = pci_enable_msi(pdev);
1608         if (ret) {
1609                 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1610                 /* enable rfkill interrupt: hw bug w/a */
1611                 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1612                 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1613                         pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1614                         pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1615                 }
1616         }
1617 }
1618
1619 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1620 {
1621         int iter_rx_q, i, ret, cpu, offset;
1622         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1623
1624         i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1625         iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1626         offset = 1 + i;
1627         for (; i < iter_rx_q ; i++) {
1628                 /*
1629                  * Get the cpu prior to the place to search
1630                  * (i.e. return will be > i - 1).
1631                  */
1632                 cpu = cpumask_next(i - offset, cpu_online_mask);
1633                 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1634                 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1635                                             &trans_pcie->affinity_mask[i]);
1636                 if (ret)
1637                         IWL_ERR(trans_pcie->trans,
1638                                 "Failed to set affinity mask for IRQ %d\n",
1639                                 i);
1640         }
1641 }
1642
1643 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1644                                       struct iwl_trans_pcie *trans_pcie)
1645 {
1646         int i;
1647
1648         for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1649                 int ret;
1650                 struct msix_entry *msix_entry;
1651                 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1652
1653                 if (!qname)
1654                         return -ENOMEM;
1655
1656                 msix_entry = &trans_pcie->msix_entries[i];
1657                 ret = devm_request_threaded_irq(&pdev->dev,
1658                                                 msix_entry->vector,
1659                                                 iwl_pcie_msix_isr,
1660                                                 (i == trans_pcie->def_irq) ?
1661                                                 iwl_pcie_irq_msix_handler :
1662                                                 iwl_pcie_irq_rx_msix_handler,
1663                                                 IRQF_SHARED,
1664                                                 qname,
1665                                                 msix_entry);
1666                 if (ret) {
1667                         IWL_ERR(trans_pcie->trans,
1668                                 "Error allocating IRQ %d\n", i);
1669
1670                         return ret;
1671                 }
1672         }
1673         iwl_pcie_irq_set_affinity(trans_pcie->trans);
1674
1675         return 0;
1676 }
1677
1678 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1679 {
1680         u32 hpm, wprot;
1681
1682         switch (trans->trans_cfg->device_family) {
1683         case IWL_DEVICE_FAMILY_9000:
1684                 wprot = PREG_PRPH_WPROT_9000;
1685                 break;
1686         case IWL_DEVICE_FAMILY_22000:
1687                 wprot = PREG_PRPH_WPROT_22000;
1688                 break;
1689         default:
1690                 return 0;
1691         }
1692
1693         hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1694         if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1695                 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1696
1697                 if (wprot_val & PREG_WFPM_ACCESS) {
1698                         IWL_ERR(trans,
1699                                 "Error, can not clear persistence bit\n");
1700                         return -EPERM;
1701                 }
1702                 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1703                                             hpm & ~PERSISTENCE_BIT);
1704         }
1705
1706         return 0;
1707 }
1708
1709 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1710 {
1711         int ret;
1712
1713         ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1714         if (ret < 0)
1715                 return ret;
1716
1717         iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1718                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1719         udelay(20);
1720         iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1721                           HPM_HIPM_GEN_CFG_CR_PG_EN |
1722                           HPM_HIPM_GEN_CFG_CR_SLP_EN);
1723         udelay(20);
1724         iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1725                             HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1726
1727         iwl_trans_pcie_sw_reset(trans);
1728
1729         return 0;
1730 }
1731
1732 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1733 {
1734         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1735         int err;
1736
1737         lockdep_assert_held(&trans_pcie->mutex);
1738
1739         err = iwl_pcie_prepare_card_hw(trans);
1740         if (err) {
1741                 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1742                 return err;
1743         }
1744
1745         err = iwl_trans_pcie_clear_persistence_bit(trans);
1746         if (err)
1747                 return err;
1748
1749         iwl_trans_pcie_sw_reset(trans);
1750
1751         if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1752             trans->trans_cfg->integrated) {
1753                 err = iwl_pcie_gen2_force_power_gating(trans);
1754                 if (err)
1755                         return err;
1756         }
1757
1758         err = iwl_pcie_apm_init(trans);
1759         if (err)
1760                 return err;
1761
1762         iwl_pcie_init_msix(trans_pcie);
1763
1764         /* From now on, the op_mode will be kept updated about RF kill state */
1765         iwl_enable_rfkill_int(trans);
1766
1767         trans_pcie->opmode_down = false;
1768
1769         /* Set is_down to false here so that...*/
1770         trans_pcie->is_down = false;
1771
1772         /* ...rfkill can call stop_device and set it false if needed */
1773         iwl_pcie_check_hw_rf_kill(trans);
1774
1775         return 0;
1776 }
1777
1778 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1779 {
1780         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1781         int ret;
1782
1783         mutex_lock(&trans_pcie->mutex);
1784         ret = _iwl_trans_pcie_start_hw(trans);
1785         mutex_unlock(&trans_pcie->mutex);
1786
1787         return ret;
1788 }
1789
1790 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1791 {
1792         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1793
1794         mutex_lock(&trans_pcie->mutex);
1795
1796         /* disable interrupts - don't enable HW RF kill interrupt */
1797         iwl_disable_interrupts(trans);
1798
1799         iwl_pcie_apm_stop(trans, true);
1800
1801         iwl_disable_interrupts(trans);
1802
1803         iwl_pcie_disable_ict(trans);
1804
1805         mutex_unlock(&trans_pcie->mutex);
1806
1807         iwl_pcie_synchronize_irqs(trans);
1808 }
1809
1810 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1811 {
1812         writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1813 }
1814
1815 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1816 {
1817         writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1818 }
1819
1820 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1821 {
1822         return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1823 }
1824
1825 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1826 {
1827         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1828                 return 0x00FFFFFF;
1829         else
1830                 return 0x000FFFFF;
1831 }
1832
1833 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1834 {
1835         u32 mask = iwl_trans_pcie_prph_msk(trans);
1836
1837         iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1838                                ((reg & mask) | (3 << 24)));
1839         return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1840 }
1841
1842 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1843                                       u32 val)
1844 {
1845         u32 mask = iwl_trans_pcie_prph_msk(trans);
1846
1847         iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1848                                ((addr & mask) | (3 << 24)));
1849         iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1850 }
1851
1852 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1853                                      const struct iwl_trans_config *trans_cfg)
1854 {
1855         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1856
1857         trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1858         trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1859         trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1860         trans->txqs.page_offs = trans_cfg->cb_data_offs;
1861         trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1862
1863         if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1864                 trans_pcie->n_no_reclaim_cmds = 0;
1865         else
1866                 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1867         if (trans_pcie->n_no_reclaim_cmds)
1868                 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1869                        trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1870
1871         trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1872         trans_pcie->rx_page_order =
1873                 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1874         trans_pcie->rx_buf_bytes =
1875                 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1876         trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1877         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1878                 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1879
1880         trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1881         trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1882
1883         trans->command_groups = trans_cfg->command_groups;
1884         trans->command_groups_size = trans_cfg->command_groups_size;
1885
1886         /* Initialize NAPI here - it should be before registering to mac80211
1887          * in the opmode but after the HW struct is allocated.
1888          * As this function may be called again in some corner cases don't
1889          * do anything if NAPI was already initialized.
1890          */
1891         if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1892                 init_dummy_netdev(&trans_pcie->napi_dev);
1893
1894         trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
1895 }
1896
1897 void iwl_trans_pcie_free(struct iwl_trans *trans)
1898 {
1899         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1900         int i;
1901
1902         iwl_pcie_synchronize_irqs(trans);
1903
1904         if (trans->trans_cfg->gen2)
1905                 iwl_txq_gen2_tx_free(trans);
1906         else
1907                 iwl_pcie_tx_free(trans);
1908         iwl_pcie_rx_free(trans);
1909
1910         if (trans_pcie->rba.alloc_wq) {
1911                 destroy_workqueue(trans_pcie->rba.alloc_wq);
1912                 trans_pcie->rba.alloc_wq = NULL;
1913         }
1914
1915         if (trans_pcie->msix_enabled) {
1916                 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1917                         irq_set_affinity_hint(
1918                                 trans_pcie->msix_entries[i].vector,
1919                                 NULL);
1920                 }
1921
1922                 trans_pcie->msix_enabled = false;
1923         } else {
1924                 iwl_pcie_free_ict(trans);
1925         }
1926
1927         iwl_pcie_free_fw_monitor(trans);
1928
1929         if (trans_pcie->pnvm_dram.size)
1930                 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
1931                                   trans_pcie->pnvm_dram.block,
1932                                   trans_pcie->pnvm_dram.physical);
1933
1934         mutex_destroy(&trans_pcie->mutex);
1935         iwl_trans_free(trans);
1936 }
1937
1938 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1939 {
1940         if (state)
1941                 set_bit(STATUS_TPOWER_PMI, &trans->status);
1942         else
1943                 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1944 }
1945
1946 struct iwl_trans_pcie_removal {
1947         struct pci_dev *pdev;
1948         struct work_struct work;
1949 };
1950
1951 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1952 {
1953         struct iwl_trans_pcie_removal *removal =
1954                 container_of(wk, struct iwl_trans_pcie_removal, work);
1955         struct pci_dev *pdev = removal->pdev;
1956         static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1957
1958         dev_err(&pdev->dev, "Device gone - attempting removal\n");
1959         kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1960         pci_lock_rescan_remove();
1961         pci_dev_put(pdev);
1962         pci_stop_and_remove_bus_device(pdev);
1963         pci_unlock_rescan_remove();
1964
1965         kfree(removal);
1966         module_put(THIS_MODULE);
1967 }
1968
1969 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
1970                                            unsigned long *flags)
1971 {
1972         int ret;
1973         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1974
1975         spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1976
1977         if (trans_pcie->cmd_hold_nic_awake)
1978                 goto out;
1979
1980         /* this bit wakes up the NIC */
1981         __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1982                                  CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1983         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1984                 udelay(2);
1985
1986         /*
1987          * These bits say the device is running, and should keep running for
1988          * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1989          * but they do not indicate that embedded SRAM is restored yet;
1990          * HW with volatile SRAM must save/restore contents to/from
1991          * host DRAM when sleeping/waking for power-saving.
1992          * Each direction takes approximately 1/4 millisecond; with this
1993          * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1994          * series of register accesses are expected (e.g. reading Event Log),
1995          * to keep device from sleeping.
1996          *
1997          * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1998          * SRAM is okay/restored.  We don't check that here because this call
1999          * is just for hardware register access; but GP1 MAC_SLEEP
2000          * check is a good idea before accessing the SRAM of HW with
2001          * volatile SRAM (e.g. reading Event Log).
2002          *
2003          * 5000 series and later (including 1000 series) have non-volatile SRAM,
2004          * and do not save/restore SRAM when power cycling.
2005          */
2006         ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2007                            CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2008                            (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2009                             CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2010         if (unlikely(ret < 0)) {
2011                 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2012
2013                 WARN_ONCE(1,
2014                           "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2015                           cntrl);
2016
2017                 iwl_trans_pcie_dump_regs(trans);
2018
2019                 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2020                         struct iwl_trans_pcie_removal *removal;
2021
2022                         if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2023                                 goto err;
2024
2025                         IWL_ERR(trans, "Device gone - scheduling removal!\n");
2026
2027                         /*
2028                          * get a module reference to avoid doing this
2029                          * while unloading anyway and to avoid
2030                          * scheduling a work with code that's being
2031                          * removed.
2032                          */
2033                         if (!try_module_get(THIS_MODULE)) {
2034                                 IWL_ERR(trans,
2035                                         "Module is being unloaded - abort\n");
2036                                 goto err;
2037                         }
2038
2039                         removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2040                         if (!removal) {
2041                                 module_put(THIS_MODULE);
2042                                 goto err;
2043                         }
2044                         /*
2045                          * we don't need to clear this flag, because
2046                          * the trans will be freed and reallocated.
2047                         */
2048                         set_bit(STATUS_TRANS_DEAD, &trans->status);
2049
2050                         removal->pdev = to_pci_dev(trans->dev);
2051                         INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2052                         pci_dev_get(removal->pdev);
2053                         schedule_work(&removal->work);
2054                 } else {
2055                         iwl_write32(trans, CSR_RESET,
2056                                     CSR_RESET_REG_FLAG_FORCE_NMI);
2057                 }
2058
2059 err:
2060                 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2061                 return false;
2062         }
2063
2064 out:
2065         /*
2066          * Fool sparse by faking we release the lock - sparse will
2067          * track nic_access anyway.
2068          */
2069         __release(&trans_pcie->reg_lock);
2070         return true;
2071 }
2072
2073 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
2074                                               unsigned long *flags)
2075 {
2076         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2077
2078         lockdep_assert_held(&trans_pcie->reg_lock);
2079
2080         /*
2081          * Fool sparse by faking we acquiring the lock - sparse will
2082          * track nic_access anyway.
2083          */
2084         __acquire(&trans_pcie->reg_lock);
2085
2086         if (trans_pcie->cmd_hold_nic_awake)
2087                 goto out;
2088
2089         __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2090                                    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2091         /*
2092          * Above we read the CSR_GP_CNTRL register, which will flush
2093          * any previous writes, but we need the write that clears the
2094          * MAC_ACCESS_REQ bit to be performed before any other writes
2095          * scheduled on different CPUs (after we drop reg_lock).
2096          */
2097 out:
2098         spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
2099 }
2100
2101 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2102                                    void *buf, int dwords)
2103 {
2104         unsigned long flags;
2105         int offs = 0;
2106         u32 *vals = buf;
2107
2108         while (offs < dwords) {
2109                 /* limit the time we spin here under lock to 1/2s */
2110                 unsigned long end = jiffies + HZ / 2;
2111                 bool resched = false;
2112
2113                 if (iwl_trans_grab_nic_access(trans, &flags)) {
2114                         iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2115                                     addr + 4 * offs);
2116
2117                         while (offs < dwords) {
2118                                 vals[offs] = iwl_read32(trans,
2119                                                         HBUS_TARG_MEM_RDAT);
2120                                 offs++;
2121
2122                                 if (time_after(jiffies, end)) {
2123                                         resched = true;
2124                                         break;
2125                                 }
2126                         }
2127                         iwl_trans_release_nic_access(trans, &flags);
2128
2129                         if (resched)
2130                                 cond_resched();
2131                 } else {
2132                         return -EBUSY;
2133                 }
2134         }
2135
2136         return 0;
2137 }
2138
2139 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2140                                     const void *buf, int dwords)
2141 {
2142         unsigned long flags;
2143         int offs, ret = 0;
2144         const u32 *vals = buf;
2145
2146         if (iwl_trans_grab_nic_access(trans, &flags)) {
2147                 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2148                 for (offs = 0; offs < dwords; offs++)
2149                         iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2150                                     vals ? vals[offs] : 0);
2151                 iwl_trans_release_nic_access(trans, &flags);
2152         } else {
2153                 ret = -EBUSY;
2154         }
2155         return ret;
2156 }
2157
2158 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2159                                         u32 *val)
2160 {
2161         return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2162                                      ofs, val);
2163 }
2164
2165 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2166 {
2167         int i;
2168
2169         for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2170                 struct iwl_txq *txq = trans->txqs.txq[i];
2171
2172                 if (i == trans->txqs.cmd.q_id)
2173                         continue;
2174
2175                 spin_lock_bh(&txq->lock);
2176
2177                 if (!block && !(WARN_ON_ONCE(!txq->block))) {
2178                         txq->block--;
2179                         if (!txq->block) {
2180                                 iwl_write32(trans, HBUS_TARG_WRPTR,
2181                                             txq->write_ptr | (i << 8));
2182                         }
2183                 } else if (block) {
2184                         txq->block++;
2185                 }
2186
2187                 spin_unlock_bh(&txq->lock);
2188         }
2189 }
2190
2191 #define IWL_FLUSH_WAIT_MS       2000
2192
2193 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2194                                        struct iwl_trans_rxq_dma_data *data)
2195 {
2196         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2197
2198         if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2199                 return -EINVAL;
2200
2201         data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2202         data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2203         data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2204         data->fr_bd_wid = 0;
2205
2206         return 0;
2207 }
2208
2209 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2210 {
2211         struct iwl_txq *txq;
2212         unsigned long now = jiffies;
2213         bool overflow_tx;
2214         u8 wr_ptr;
2215
2216         /* Make sure the NIC is still alive in the bus */
2217         if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2218                 return -ENODEV;
2219
2220         if (!test_bit(txq_idx, trans->txqs.queue_used))
2221                 return -EINVAL;
2222
2223         IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2224         txq = trans->txqs.txq[txq_idx];
2225
2226         spin_lock_bh(&txq->lock);
2227         overflow_tx = txq->overflow_tx ||
2228                       !skb_queue_empty(&txq->overflow_q);
2229         spin_unlock_bh(&txq->lock);
2230
2231         wr_ptr = READ_ONCE(txq->write_ptr);
2232
2233         while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2234                 overflow_tx) &&
2235                !time_after(jiffies,
2236                            now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2237                 u8 write_ptr = READ_ONCE(txq->write_ptr);
2238
2239                 /*
2240                  * If write pointer moved during the wait, warn only
2241                  * if the TX came from op mode. In case TX came from
2242                  * trans layer (overflow TX) don't warn.
2243                  */
2244                 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2245                               "WR pointer moved while flushing %d -> %d\n",
2246                               wr_ptr, write_ptr))
2247                         return -ETIMEDOUT;
2248                 wr_ptr = write_ptr;
2249
2250                 usleep_range(1000, 2000);
2251
2252                 spin_lock_bh(&txq->lock);
2253                 overflow_tx = txq->overflow_tx ||
2254                               !skb_queue_empty(&txq->overflow_q);
2255                 spin_unlock_bh(&txq->lock);
2256         }
2257
2258         if (txq->read_ptr != txq->write_ptr) {
2259                 IWL_ERR(trans,
2260                         "fail to flush all tx fifo queues Q %d\n", txq_idx);
2261                 iwl_txq_log_scd_error(trans, txq);
2262                 return -ETIMEDOUT;
2263         }
2264
2265         IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2266
2267         return 0;
2268 }
2269
2270 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2271 {
2272         int cnt;
2273         int ret = 0;
2274
2275         /* waiting for all the tx frames complete might take a while */
2276         for (cnt = 0;
2277              cnt < trans->trans_cfg->base_params->num_of_queues;
2278              cnt++) {
2279
2280                 if (cnt == trans->txqs.cmd.q_id)
2281                         continue;
2282                 if (!test_bit(cnt, trans->txqs.queue_used))
2283                         continue;
2284                 if (!(BIT(cnt) & txq_bm))
2285                         continue;
2286
2287                 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2288                 if (ret)
2289                         break;
2290         }
2291
2292         return ret;
2293 }
2294
2295 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2296                                          u32 mask, u32 value)
2297 {
2298         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2299         unsigned long flags;
2300
2301         spin_lock_irqsave(&trans_pcie->reg_lock, flags);
2302         __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2303         spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
2304 }
2305
2306 static const char *get_csr_string(int cmd)
2307 {
2308 #define IWL_CMD(x) case x: return #x
2309         switch (cmd) {
2310         IWL_CMD(CSR_HW_IF_CONFIG_REG);
2311         IWL_CMD(CSR_INT_COALESCING);
2312         IWL_CMD(CSR_INT);
2313         IWL_CMD(CSR_INT_MASK);
2314         IWL_CMD(CSR_FH_INT_STATUS);
2315         IWL_CMD(CSR_GPIO_IN);
2316         IWL_CMD(CSR_RESET);
2317         IWL_CMD(CSR_GP_CNTRL);
2318         IWL_CMD(CSR_HW_REV);
2319         IWL_CMD(CSR_EEPROM_REG);
2320         IWL_CMD(CSR_EEPROM_GP);
2321         IWL_CMD(CSR_OTP_GP_REG);
2322         IWL_CMD(CSR_GIO_REG);
2323         IWL_CMD(CSR_GP_UCODE_REG);
2324         IWL_CMD(CSR_GP_DRIVER_REG);
2325         IWL_CMD(CSR_UCODE_DRV_GP1);
2326         IWL_CMD(CSR_UCODE_DRV_GP2);
2327         IWL_CMD(CSR_LED_REG);
2328         IWL_CMD(CSR_DRAM_INT_TBL_REG);
2329         IWL_CMD(CSR_GIO_CHICKEN_BITS);
2330         IWL_CMD(CSR_ANA_PLL_CFG);
2331         IWL_CMD(CSR_HW_REV_WA_REG);
2332         IWL_CMD(CSR_MONITOR_STATUS_REG);
2333         IWL_CMD(CSR_DBG_HPET_MEM_REG);
2334         default:
2335                 return "UNKNOWN";
2336         }
2337 #undef IWL_CMD
2338 }
2339
2340 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2341 {
2342         int i;
2343         static const u32 csr_tbl[] = {
2344                 CSR_HW_IF_CONFIG_REG,
2345                 CSR_INT_COALESCING,
2346                 CSR_INT,
2347                 CSR_INT_MASK,
2348                 CSR_FH_INT_STATUS,
2349                 CSR_GPIO_IN,
2350                 CSR_RESET,
2351                 CSR_GP_CNTRL,
2352                 CSR_HW_REV,
2353                 CSR_EEPROM_REG,
2354                 CSR_EEPROM_GP,
2355                 CSR_OTP_GP_REG,
2356                 CSR_GIO_REG,
2357                 CSR_GP_UCODE_REG,
2358                 CSR_GP_DRIVER_REG,
2359                 CSR_UCODE_DRV_GP1,
2360                 CSR_UCODE_DRV_GP2,
2361                 CSR_LED_REG,
2362                 CSR_DRAM_INT_TBL_REG,
2363                 CSR_GIO_CHICKEN_BITS,
2364                 CSR_ANA_PLL_CFG,
2365                 CSR_MONITOR_STATUS_REG,
2366                 CSR_HW_REV_WA_REG,
2367                 CSR_DBG_HPET_MEM_REG
2368         };
2369         IWL_ERR(trans, "CSR values:\n");
2370         IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2371                 "CSR_INT_PERIODIC_REG)\n");
2372         for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2373                 IWL_ERR(trans, "  %25s: 0X%08x\n",
2374                         get_csr_string(csr_tbl[i]),
2375                         iwl_read32(trans, csr_tbl[i]));
2376         }
2377 }
2378
2379 #ifdef CONFIG_IWLWIFI_DEBUGFS
2380 /* create and remove of files */
2381 #define DEBUGFS_ADD_FILE(name, parent, mode) do {                       \
2382         debugfs_create_file(#name, mode, parent, trans,                 \
2383                             &iwl_dbgfs_##name##_ops);                   \
2384 } while (0)
2385
2386 /* file operation */
2387 #define DEBUGFS_READ_FILE_OPS(name)                                     \
2388 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2389         .read = iwl_dbgfs_##name##_read,                                \
2390         .open = simple_open,                                            \
2391         .llseek = generic_file_llseek,                                  \
2392 };
2393
2394 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2395 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2396         .write = iwl_dbgfs_##name##_write,                              \
2397         .open = simple_open,                                            \
2398         .llseek = generic_file_llseek,                                  \
2399 };
2400
2401 #define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
2402 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2403         .write = iwl_dbgfs_##name##_write,                              \
2404         .read = iwl_dbgfs_##name##_read,                                \
2405         .open = simple_open,                                            \
2406         .llseek = generic_file_llseek,                                  \
2407 };
2408
2409 struct iwl_dbgfs_tx_queue_priv {
2410         struct iwl_trans *trans;
2411 };
2412
2413 struct iwl_dbgfs_tx_queue_state {
2414         loff_t pos;
2415 };
2416
2417 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2418 {
2419         struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2420         struct iwl_dbgfs_tx_queue_state *state;
2421
2422         if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2423                 return NULL;
2424
2425         state = kmalloc(sizeof(*state), GFP_KERNEL);
2426         if (!state)
2427                 return NULL;
2428         state->pos = *pos;
2429         return state;
2430 }
2431
2432 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2433                                          void *v, loff_t *pos)
2434 {
2435         struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2436         struct iwl_dbgfs_tx_queue_state *state = v;
2437
2438         *pos = ++state->pos;
2439
2440         if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2441                 return NULL;
2442
2443         return state;
2444 }
2445
2446 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2447 {
2448         kfree(v);
2449 }
2450
2451 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2452 {
2453         struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2454         struct iwl_dbgfs_tx_queue_state *state = v;
2455         struct iwl_trans *trans = priv->trans;
2456         struct iwl_txq *txq = trans->txqs.txq[state->pos];
2457
2458         seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2459                    (unsigned int)state->pos,
2460                    !!test_bit(state->pos, trans->txqs.queue_used),
2461                    !!test_bit(state->pos, trans->txqs.queue_stopped));
2462         if (txq)
2463                 seq_printf(seq,
2464                            "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2465                            txq->read_ptr, txq->write_ptr,
2466                            txq->need_update, txq->frozen,
2467                            txq->n_window, txq->ampdu);
2468         else
2469                 seq_puts(seq, "(unallocated)");
2470
2471         if (state->pos == trans->txqs.cmd.q_id)
2472                 seq_puts(seq, " (HCMD)");
2473         seq_puts(seq, "\n");
2474
2475         return 0;
2476 }
2477
2478 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2479         .start = iwl_dbgfs_tx_queue_seq_start,
2480         .next = iwl_dbgfs_tx_queue_seq_next,
2481         .stop = iwl_dbgfs_tx_queue_seq_stop,
2482         .show = iwl_dbgfs_tx_queue_seq_show,
2483 };
2484
2485 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2486 {
2487         struct iwl_dbgfs_tx_queue_priv *priv;
2488
2489         priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2490                                   sizeof(*priv));
2491
2492         if (!priv)
2493                 return -ENOMEM;
2494
2495         priv->trans = inode->i_private;
2496         return 0;
2497 }
2498
2499 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2500                                        char __user *user_buf,
2501                                        size_t count, loff_t *ppos)
2502 {
2503         struct iwl_trans *trans = file->private_data;
2504         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2505         char *buf;
2506         int pos = 0, i, ret;
2507         size_t bufsz;
2508
2509         bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2510
2511         if (!trans_pcie->rxq)
2512                 return -EAGAIN;
2513
2514         buf = kzalloc(bufsz, GFP_KERNEL);
2515         if (!buf)
2516                 return -ENOMEM;
2517
2518         for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2519                 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2520
2521                 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2522                                  i);
2523                 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2524                                  rxq->read);
2525                 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2526                                  rxq->write);
2527                 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2528                                  rxq->write_actual);
2529                 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2530                                  rxq->need_update);
2531                 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2532                                  rxq->free_count);
2533                 if (rxq->rb_stts) {
2534                         u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
2535                                                                      rxq));
2536                         pos += scnprintf(buf + pos, bufsz - pos,
2537                                          "\tclosed_rb_num: %u\n",
2538                                          r & 0x0FFF);
2539                 } else {
2540                         pos += scnprintf(buf + pos, bufsz - pos,
2541                                          "\tclosed_rb_num: Not Allocated\n");
2542                 }
2543         }
2544         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2545         kfree(buf);
2546
2547         return ret;
2548 }
2549
2550 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2551                                         char __user *user_buf,
2552                                         size_t count, loff_t *ppos)
2553 {
2554         struct iwl_trans *trans = file->private_data;
2555         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2556         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2557
2558         int pos = 0;
2559         char *buf;
2560         int bufsz = 24 * 64; /* 24 items * 64 char per item */
2561         ssize_t ret;
2562
2563         buf = kzalloc(bufsz, GFP_KERNEL);
2564         if (!buf)
2565                 return -ENOMEM;
2566
2567         pos += scnprintf(buf + pos, bufsz - pos,
2568                         "Interrupt Statistics Report:\n");
2569
2570         pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2571                 isr_stats->hw);
2572         pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2573                 isr_stats->sw);
2574         if (isr_stats->sw || isr_stats->hw) {
2575                 pos += scnprintf(buf + pos, bufsz - pos,
2576                         "\tLast Restarting Code:  0x%X\n",
2577                         isr_stats->err_code);
2578         }
2579 #ifdef CONFIG_IWLWIFI_DEBUG
2580         pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2581                 isr_stats->sch);
2582         pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2583                 isr_stats->alive);
2584 #endif
2585         pos += scnprintf(buf + pos, bufsz - pos,
2586                 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2587
2588         pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2589                 isr_stats->ctkill);
2590
2591         pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2592                 isr_stats->wakeup);
2593
2594         pos += scnprintf(buf + pos, bufsz - pos,
2595                 "Rx command responses:\t\t %u\n", isr_stats->rx);
2596
2597         pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2598                 isr_stats->tx);
2599
2600         pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2601                 isr_stats->unhandled);
2602
2603         ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2604         kfree(buf);
2605         return ret;
2606 }
2607
2608 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2609                                          const char __user *user_buf,
2610                                          size_t count, loff_t *ppos)
2611 {
2612         struct iwl_trans *trans = file->private_data;
2613         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2614         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2615         u32 reset_flag;
2616         int ret;
2617
2618         ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2619         if (ret)
2620                 return ret;
2621         if (reset_flag == 0)
2622                 memset(isr_stats, 0, sizeof(*isr_stats));
2623
2624         return count;
2625 }
2626
2627 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2628                                    const char __user *user_buf,
2629                                    size_t count, loff_t *ppos)
2630 {
2631         struct iwl_trans *trans = file->private_data;
2632
2633         iwl_pcie_dump_csr(trans);
2634
2635         return count;
2636 }
2637
2638 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2639                                      char __user *user_buf,
2640                                      size_t count, loff_t *ppos)
2641 {
2642         struct iwl_trans *trans = file->private_data;
2643         char *buf = NULL;
2644         ssize_t ret;
2645
2646         ret = iwl_dump_fh(trans, &buf);
2647         if (ret < 0)
2648                 return ret;
2649         if (!buf)
2650                 return -EINVAL;
2651         ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2652         kfree(buf);
2653         return ret;
2654 }
2655
2656 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2657                                      char __user *user_buf,
2658                                      size_t count, loff_t *ppos)
2659 {
2660         struct iwl_trans *trans = file->private_data;
2661         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2662         char buf[100];
2663         int pos;
2664
2665         pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2666                         trans_pcie->debug_rfkill,
2667                         !(iwl_read32(trans, CSR_GP_CNTRL) &
2668                                 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2669
2670         return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2671 }
2672
2673 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2674                                       const char __user *user_buf,
2675                                       size_t count, loff_t *ppos)
2676 {
2677         struct iwl_trans *trans = file->private_data;
2678         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2679         bool new_value;
2680         int ret;
2681
2682         ret = kstrtobool_from_user(user_buf, count, &new_value);
2683         if (ret)
2684                 return ret;
2685         if (new_value == trans_pcie->debug_rfkill)
2686                 return count;
2687         IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2688                  trans_pcie->debug_rfkill, new_value);
2689         trans_pcie->debug_rfkill = new_value;
2690         iwl_pcie_handle_rfkill_irq(trans);
2691
2692         return count;
2693 }
2694
2695 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2696                                        struct file *file)
2697 {
2698         struct iwl_trans *trans = inode->i_private;
2699         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2700
2701         if (!trans->dbg.dest_tlv ||
2702             trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2703                 IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2704                 return -ENOENT;
2705         }
2706
2707         if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2708                 return -EBUSY;
2709
2710         trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2711         return simple_open(inode, file);
2712 }
2713
2714 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2715                                           struct file *file)
2716 {
2717         struct iwl_trans_pcie *trans_pcie =
2718                 IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2719
2720         if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2721                 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2722         return 0;
2723 }
2724
2725 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2726                                   void *buf, ssize_t *size,
2727                                   ssize_t *bytes_copied)
2728 {
2729         int buf_size_left = count - *bytes_copied;
2730
2731         buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2732         if (*size > buf_size_left)
2733                 *size = buf_size_left;
2734
2735         *size -= copy_to_user(user_buf, buf, *size);
2736         *bytes_copied += *size;
2737
2738         if (buf_size_left == *size)
2739                 return true;
2740         return false;
2741 }
2742
2743 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2744                                            char __user *user_buf,
2745                                            size_t count, loff_t *ppos)
2746 {
2747         struct iwl_trans *trans = file->private_data;
2748         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2749         void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2750         struct cont_rec *data = &trans_pcie->fw_mon_data;
2751         u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2752         ssize_t size, bytes_copied = 0;
2753         bool b_full;
2754
2755         if (trans->dbg.dest_tlv) {
2756                 write_ptr_addr =
2757                         le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2758                 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2759         } else {
2760                 write_ptr_addr = MON_BUFF_WRPTR;
2761                 wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2762         }
2763
2764         if (unlikely(!trans->dbg.rec_on))
2765                 return 0;
2766
2767         mutex_lock(&data->mutex);
2768         if (data->state ==
2769             IWL_FW_MON_DBGFS_STATE_DISABLED) {
2770                 mutex_unlock(&data->mutex);
2771                 return 0;
2772         }
2773
2774         /* write_ptr position in bytes rather then DW */
2775         write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2776         wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2777
2778         if (data->prev_wrap_cnt == wrap_cnt) {
2779                 size = write_ptr - data->prev_wr_ptr;
2780                 curr_buf = cpu_addr + data->prev_wr_ptr;
2781                 b_full = iwl_write_to_user_buf(user_buf, count,
2782                                                curr_buf, &size,
2783                                                &bytes_copied);
2784                 data->prev_wr_ptr += size;
2785
2786         } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2787                    write_ptr < data->prev_wr_ptr) {
2788                 size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2789                 curr_buf = cpu_addr + data->prev_wr_ptr;
2790                 b_full = iwl_write_to_user_buf(user_buf, count,
2791                                                curr_buf, &size,
2792                                                &bytes_copied);
2793                 data->prev_wr_ptr += size;
2794
2795                 if (!b_full) {
2796                         size = write_ptr;
2797                         b_full = iwl_write_to_user_buf(user_buf, count,
2798                                                        cpu_addr, &size,
2799                                                        &bytes_copied);
2800                         data->prev_wr_ptr = size;
2801                         data->prev_wrap_cnt++;
2802                 }
2803         } else {
2804                 if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2805                     write_ptr > data->prev_wr_ptr)
2806                         IWL_WARN(trans,
2807                                  "write pointer passed previous write pointer, start copying from the beginning\n");
2808                 else if (!unlikely(data->prev_wrap_cnt == 0 &&
2809                                    data->prev_wr_ptr == 0))
2810                         IWL_WARN(trans,
2811                                  "monitor data is out of sync, start copying from the beginning\n");
2812
2813                 size = write_ptr;
2814                 b_full = iwl_write_to_user_buf(user_buf, count,
2815                                                cpu_addr, &size,
2816                                                &bytes_copied);
2817                 data->prev_wr_ptr = size;
2818                 data->prev_wrap_cnt = wrap_cnt;
2819         }
2820
2821         mutex_unlock(&data->mutex);
2822
2823         return bytes_copied;
2824 }
2825
2826 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2827 DEBUGFS_READ_FILE_OPS(fh_reg);
2828 DEBUGFS_READ_FILE_OPS(rx_queue);
2829 DEBUGFS_WRITE_FILE_OPS(csr);
2830 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2831 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2832         .owner = THIS_MODULE,
2833         .open = iwl_dbgfs_tx_queue_open,
2834         .read = seq_read,
2835         .llseek = seq_lseek,
2836         .release = seq_release_private,
2837 };
2838
2839 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2840         .read = iwl_dbgfs_monitor_data_read,
2841         .open = iwl_dbgfs_monitor_data_open,
2842         .release = iwl_dbgfs_monitor_data_release,
2843 };
2844
2845 /* Create the debugfs files and directories */
2846 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2847 {
2848         struct dentry *dir = trans->dbgfs_dir;
2849
2850         DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2851         DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2852         DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2853         DEBUGFS_ADD_FILE(csr, dir, 0200);
2854         DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2855         DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2856         DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2857 }
2858
2859 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2860 {
2861         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2862         struct cont_rec *data = &trans_pcie->fw_mon_data;
2863
2864         mutex_lock(&data->mutex);
2865         data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2866         mutex_unlock(&data->mutex);
2867 }
2868 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2869
2870 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2871 {
2872         u32 cmdlen = 0;
2873         int i;
2874
2875         for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
2876                 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
2877
2878         return cmdlen;
2879 }
2880
2881 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2882                                    struct iwl_fw_error_dump_data **data,
2883                                    int allocated_rb_nums)
2884 {
2885         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2886         int max_len = trans_pcie->rx_buf_bytes;
2887         /* Dump RBs is supported only for pre-9000 devices (1 queue) */
2888         struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2889         u32 i, r, j, rb_len = 0;
2890
2891         spin_lock(&rxq->lock);
2892
2893         r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2894
2895         for (i = rxq->read, j = 0;
2896              i != r && j < allocated_rb_nums;
2897              i = (i + 1) & RX_QUEUE_MASK, j++) {
2898                 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2899                 struct iwl_fw_error_dump_rb *rb;
2900
2901                 dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2902                                DMA_FROM_DEVICE);
2903
2904                 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2905
2906                 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2907                 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2908                 rb = (void *)(*data)->data;
2909                 rb->index = cpu_to_le32(i);
2910                 memcpy(rb->data, page_address(rxb->page), max_len);
2911                 /* remap the page for the free benefit */
2912                 rxb->page_dma = dma_map_page(trans->dev, rxb->page,
2913                                              rxb->offset, max_len,
2914                                              DMA_FROM_DEVICE);
2915
2916                 *data = iwl_fw_error_next_data(*data);
2917         }
2918
2919         spin_unlock(&rxq->lock);
2920
2921         return rb_len;
2922 }
2923 #define IWL_CSR_TO_DUMP (0x250)
2924
2925 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2926                                    struct iwl_fw_error_dump_data **data)
2927 {
2928         u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2929         __le32 *val;
2930         int i;
2931
2932         (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2933         (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2934         val = (void *)(*data)->data;
2935
2936         for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2937                 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2938
2939         *data = iwl_fw_error_next_data(*data);
2940
2941         return csr_len;
2942 }
2943
2944 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2945                                        struct iwl_fw_error_dump_data **data)
2946 {
2947         u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2948         unsigned long flags;
2949         __le32 *val;
2950         int i;
2951
2952         if (!iwl_trans_grab_nic_access(trans, &flags))
2953                 return 0;
2954
2955         (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2956         (*data)->len = cpu_to_le32(fh_regs_len);
2957         val = (void *)(*data)->data;
2958
2959         if (!trans->trans_cfg->gen2)
2960                 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
2961                      i += sizeof(u32))
2962                         *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2963         else
2964                 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
2965                      i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
2966                      i += sizeof(u32))
2967                         *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2968                                                                       i));
2969
2970         iwl_trans_release_nic_access(trans, &flags);
2971
2972         *data = iwl_fw_error_next_data(*data);
2973
2974         return sizeof(**data) + fh_regs_len;
2975 }
2976
2977 static u32
2978 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
2979                                  struct iwl_fw_error_dump_fw_mon *fw_mon_data,
2980                                  u32 monitor_len)
2981 {
2982         u32 buf_size_in_dwords = (monitor_len >> 2);
2983         u32 *buffer = (u32 *)fw_mon_data->data;
2984         unsigned long flags;
2985         u32 i;
2986
2987         if (!iwl_trans_grab_nic_access(trans, &flags))
2988                 return 0;
2989
2990         iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2991         for (i = 0; i < buf_size_in_dwords; i++)
2992                 buffer[i] = iwl_read_umac_prph_no_grab(trans,
2993                                                        MON_DMARB_RD_DATA_ADDR);
2994         iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2995
2996         iwl_trans_release_nic_access(trans, &flags);
2997
2998         return monitor_len;
2999 }
3000
3001 static void
3002 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3003                              struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3004 {
3005         u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3006
3007         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3008                 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3009                 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3010                 write_ptr = DBGC_CUR_DBGBUF_STATUS;
3011                 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3012         } else if (trans->dbg.dest_tlv) {
3013                 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3014                 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3015                 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3016         } else {
3017                 base = MON_BUFF_BASE_ADDR;
3018                 write_ptr = MON_BUFF_WRPTR;
3019                 wrap_cnt = MON_BUFF_CYCLE_CNT;
3020         }
3021
3022         write_ptr_val = iwl_read_prph(trans, write_ptr);
3023         fw_mon_data->fw_mon_cycle_cnt =
3024                 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3025         fw_mon_data->fw_mon_base_ptr =
3026                 cpu_to_le32(iwl_read_prph(trans, base));
3027         if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3028                 fw_mon_data->fw_mon_base_high_ptr =
3029                         cpu_to_le32(iwl_read_prph(trans, base_high));
3030                 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3031                 /* convert wrtPtr to DWs, to align with all HWs */
3032                 write_ptr_val >>= 2;
3033         }
3034         fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3035 }
3036
3037 static u32
3038 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3039                             struct iwl_fw_error_dump_data **data,
3040                             u32 monitor_len)
3041 {
3042         struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3043         u32 len = 0;
3044
3045         if (trans->dbg.dest_tlv ||
3046             (fw_mon->size &&
3047              (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3048               trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3049                 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3050
3051                 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3052                 fw_mon_data = (void *)(*data)->data;
3053
3054                 iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3055
3056                 len += sizeof(**data) + sizeof(*fw_mon_data);
3057                 if (fw_mon->size) {
3058                         memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3059                         monitor_len = fw_mon->size;
3060                 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3061                         u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3062                         /*
3063                          * Update pointers to reflect actual values after
3064                          * shifting
3065                          */
3066                         if (trans->dbg.dest_tlv->version) {
3067                                 base = (iwl_read_prph(trans, base) &
3068                                         IWL_LDBG_M2S_BUF_BA_MSK) <<
3069                                        trans->dbg.dest_tlv->base_shift;
3070                                 base *= IWL_M2S_UNIT_SIZE;
3071                                 base += trans->cfg->smem_offset;
3072                         } else {
3073                                 base = iwl_read_prph(trans, base) <<
3074                                        trans->dbg.dest_tlv->base_shift;
3075                         }
3076
3077                         iwl_trans_read_mem(trans, base, fw_mon_data->data,
3078                                            monitor_len / sizeof(u32));
3079                 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3080                         monitor_len =
3081                                 iwl_trans_pci_dump_marbh_monitor(trans,
3082                                                                  fw_mon_data,
3083                                                                  monitor_len);
3084                 } else {
3085                         /* Didn't match anything - output no monitor data */
3086                         monitor_len = 0;
3087                 }
3088
3089                 len += monitor_len;
3090                 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3091         }
3092
3093         return len;
3094 }
3095
3096 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3097 {
3098         if (trans->dbg.fw_mon.size) {
3099                 *len += sizeof(struct iwl_fw_error_dump_data) +
3100                         sizeof(struct iwl_fw_error_dump_fw_mon) +
3101                         trans->dbg.fw_mon.size;
3102                 return trans->dbg.fw_mon.size;
3103         } else if (trans->dbg.dest_tlv) {
3104                 u32 base, end, cfg_reg, monitor_len;
3105
3106                 if (trans->dbg.dest_tlv->version == 1) {
3107                         cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3108                         cfg_reg = iwl_read_prph(trans, cfg_reg);
3109                         base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3110                                 trans->dbg.dest_tlv->base_shift;
3111                         base *= IWL_M2S_UNIT_SIZE;
3112                         base += trans->cfg->smem_offset;
3113
3114                         monitor_len =
3115                                 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3116                                 trans->dbg.dest_tlv->end_shift;
3117                         monitor_len *= IWL_M2S_UNIT_SIZE;
3118                 } else {
3119                         base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3120                         end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3121
3122                         base = iwl_read_prph(trans, base) <<
3123                                trans->dbg.dest_tlv->base_shift;
3124                         end = iwl_read_prph(trans, end) <<
3125                               trans->dbg.dest_tlv->end_shift;
3126
3127                         /* Make "end" point to the actual end */
3128                         if (trans->trans_cfg->device_family >=
3129                             IWL_DEVICE_FAMILY_8000 ||
3130                             trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3131                                 end += (1 << trans->dbg.dest_tlv->end_shift);
3132                         monitor_len = end - base;
3133                 }
3134                 *len += sizeof(struct iwl_fw_error_dump_data) +
3135                         sizeof(struct iwl_fw_error_dump_fw_mon) +
3136                         monitor_len;
3137                 return monitor_len;
3138         }
3139         return 0;
3140 }
3141
3142 static struct iwl_trans_dump_data
3143 *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3144                           u32 dump_mask)
3145 {
3146         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3147         struct iwl_fw_error_dump_data *data;
3148         struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3149         struct iwl_fw_error_dump_txcmd *txcmd;
3150         struct iwl_trans_dump_data *dump_data;
3151         u32 len, num_rbs = 0, monitor_len = 0;
3152         int i, ptr;
3153         bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3154                         !trans->trans_cfg->mq_rx_supported &&
3155                         dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3156
3157         if (!dump_mask)
3158                 return NULL;
3159
3160         /* transport dump header */
3161         len = sizeof(*dump_data);
3162
3163         /* host commands */
3164         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3165                 len += sizeof(*data) +
3166                         cmdq->n_window * (sizeof(*txcmd) +
3167                                           TFD_MAX_PAYLOAD_SIZE);
3168
3169         /* FW monitor */
3170         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3171                 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3172
3173         /* CSR registers */
3174         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3175                 len += sizeof(*data) + IWL_CSR_TO_DUMP;
3176
3177         /* FH registers */
3178         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3179                 if (trans->trans_cfg->gen2)
3180                         len += sizeof(*data) +
3181                                (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3182                                 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3183                 else
3184                         len += sizeof(*data) +
3185                                (FH_MEM_UPPER_BOUND -
3186                                 FH_MEM_LOWER_BOUND);
3187         }
3188
3189         if (dump_rbs) {
3190                 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3191                 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3192                 /* RBs */
3193                 num_rbs =
3194                         le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3195                         & 0x0FFF;
3196                 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3197                 len += num_rbs * (sizeof(*data) +
3198                                   sizeof(struct iwl_fw_error_dump_rb) +
3199                                   (PAGE_SIZE << trans_pcie->rx_page_order));
3200         }
3201
3202         /* Paged memory for gen2 HW */
3203         if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3204                 for (i = 0; i < trans->init_dram.paging_cnt; i++)
3205                         len += sizeof(*data) +
3206                                sizeof(struct iwl_fw_error_dump_paging) +
3207                                trans->init_dram.paging[i].size;
3208
3209         dump_data = vzalloc(len);
3210         if (!dump_data)
3211                 return NULL;
3212
3213         len = 0;
3214         data = (void *)dump_data->data;
3215
3216         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3217                 u16 tfd_size = trans->txqs.tfd.size;
3218
3219                 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3220                 txcmd = (void *)data->data;
3221                 spin_lock_bh(&cmdq->lock);
3222                 ptr = cmdq->write_ptr;
3223                 for (i = 0; i < cmdq->n_window; i++) {
3224                         u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3225                         u8 tfdidx;
3226                         u32 caplen, cmdlen;
3227
3228                         if (trans->trans_cfg->use_tfh)
3229                                 tfdidx = idx;
3230                         else
3231                                 tfdidx = ptr;
3232
3233                         cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3234                                                            (u8 *)cmdq->tfds +
3235                                                            tfd_size * tfdidx);
3236                         caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3237
3238                         if (cmdlen) {
3239                                 len += sizeof(*txcmd) + caplen;
3240                                 txcmd->cmdlen = cpu_to_le32(cmdlen);
3241                                 txcmd->caplen = cpu_to_le32(caplen);
3242                                 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3243                                        caplen);
3244                                 txcmd = (void *)((u8 *)txcmd->data + caplen);
3245                         }
3246
3247                         ptr = iwl_txq_dec_wrap(trans, ptr);
3248                 }
3249                 spin_unlock_bh(&cmdq->lock);
3250
3251                 data->len = cpu_to_le32(len);
3252                 len += sizeof(*data);
3253                 data = iwl_fw_error_next_data(data);
3254         }
3255
3256         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3257                 len += iwl_trans_pcie_dump_csr(trans, &data);
3258         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3259                 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3260         if (dump_rbs)
3261                 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3262
3263         /* Paged memory for gen2 HW */
3264         if (trans->trans_cfg->gen2 &&
3265             dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3266                 for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3267                         struct iwl_fw_error_dump_paging *paging;
3268                         u32 page_len = trans->init_dram.paging[i].size;
3269
3270                         data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3271                         data->len = cpu_to_le32(sizeof(*paging) + page_len);
3272                         paging = (void *)data->data;
3273                         paging->index = cpu_to_le32(i);
3274                         memcpy(paging->data,
3275                                trans->init_dram.paging[i].block, page_len);
3276                         data = iwl_fw_error_next_data(data);
3277
3278                         len += sizeof(*data) + sizeof(*paging) + page_len;
3279                 }
3280         }
3281         if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3282                 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3283
3284         dump_data->len = len;
3285
3286         return dump_data;
3287 }
3288
3289 #ifdef CONFIG_PM_SLEEP
3290 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
3291 {
3292         return 0;
3293 }
3294
3295 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
3296 {
3297 }
3298 #endif /* CONFIG_PM_SLEEP */
3299
3300 #define IWL_TRANS_COMMON_OPS                                            \
3301         .op_mode_leave = iwl_trans_pcie_op_mode_leave,                  \
3302         .write8 = iwl_trans_pcie_write8,                                \
3303         .write32 = iwl_trans_pcie_write32,                              \
3304         .read32 = iwl_trans_pcie_read32,                                \
3305         .read_prph = iwl_trans_pcie_read_prph,                          \
3306         .write_prph = iwl_trans_pcie_write_prph,                        \
3307         .read_mem = iwl_trans_pcie_read_mem,                            \
3308         .write_mem = iwl_trans_pcie_write_mem,                          \
3309         .read_config32 = iwl_trans_pcie_read_config32,                  \
3310         .configure = iwl_trans_pcie_configure,                          \
3311         .set_pmi = iwl_trans_pcie_set_pmi,                              \
3312         .sw_reset = iwl_trans_pcie_sw_reset,                            \
3313         .grab_nic_access = iwl_trans_pcie_grab_nic_access,              \
3314         .release_nic_access = iwl_trans_pcie_release_nic_access,        \
3315         .set_bits_mask = iwl_trans_pcie_set_bits_mask,                  \
3316         .dump_data = iwl_trans_pcie_dump_data,                          \
3317         .d3_suspend = iwl_trans_pcie_d3_suspend,                        \
3318         .d3_resume = iwl_trans_pcie_d3_resume,                          \
3319         .sync_nmi = iwl_trans_pcie_sync_nmi
3320
3321 #ifdef CONFIG_PM_SLEEP
3322 #define IWL_TRANS_PM_OPS                                                \
3323         .suspend = iwl_trans_pcie_suspend,                              \
3324         .resume = iwl_trans_pcie_resume,
3325 #else
3326 #define IWL_TRANS_PM_OPS
3327 #endif /* CONFIG_PM_SLEEP */
3328
3329 static const struct iwl_trans_ops trans_ops_pcie = {
3330         IWL_TRANS_COMMON_OPS,
3331         IWL_TRANS_PM_OPS
3332         .start_hw = iwl_trans_pcie_start_hw,
3333         .fw_alive = iwl_trans_pcie_fw_alive,
3334         .start_fw = iwl_trans_pcie_start_fw,
3335         .stop_device = iwl_trans_pcie_stop_device,
3336
3337         .send_cmd = iwl_trans_pcie_send_hcmd,
3338
3339         .tx = iwl_trans_pcie_tx,
3340         .reclaim = iwl_txq_reclaim,
3341
3342         .txq_disable = iwl_trans_pcie_txq_disable,
3343         .txq_enable = iwl_trans_pcie_txq_enable,
3344
3345         .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3346
3347         .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3348
3349         .freeze_txq_timer = iwl_trans_txq_freeze_timer,
3350         .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3351 #ifdef CONFIG_IWLWIFI_DEBUGFS
3352         .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3353 #endif
3354 };
3355
3356 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3357         IWL_TRANS_COMMON_OPS,
3358         IWL_TRANS_PM_OPS
3359         .start_hw = iwl_trans_pcie_start_hw,
3360         .fw_alive = iwl_trans_pcie_gen2_fw_alive,
3361         .start_fw = iwl_trans_pcie_gen2_start_fw,
3362         .stop_device = iwl_trans_pcie_gen2_stop_device,
3363
3364         .send_cmd = iwl_trans_pcie_gen2_send_hcmd,
3365
3366         .tx = iwl_txq_gen2_tx,
3367         .reclaim = iwl_txq_reclaim,
3368
3369         .set_q_ptrs = iwl_txq_set_q_ptrs,
3370
3371         .txq_alloc = iwl_txq_dyn_alloc,
3372         .txq_free = iwl_txq_dyn_free,
3373         .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3374         .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3375         .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3376 #ifdef CONFIG_IWLWIFI_DEBUGFS
3377         .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3378 #endif
3379 };
3380
3381 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3382                                const struct pci_device_id *ent,
3383                                const struct iwl_cfg_trans_params *cfg_trans)
3384 {
3385         struct iwl_trans_pcie *trans_pcie;
3386         struct iwl_trans *trans;
3387         int ret, addr_size;
3388         const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3389
3390         if (!cfg_trans->gen2)
3391                 ops = &trans_ops_pcie;
3392
3393         ret = pcim_enable_device(pdev);
3394         if (ret)
3395                 return ERR_PTR(ret);
3396
3397         trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3398                                 cfg_trans);
3399         if (!trans)
3400                 return ERR_PTR(-ENOMEM);
3401
3402         trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3403
3404         trans_pcie->trans = trans;
3405         trans_pcie->opmode_down = true;
3406         spin_lock_init(&trans_pcie->irq_lock);
3407         spin_lock_init(&trans_pcie->reg_lock);
3408         spin_lock_init(&trans_pcie->alloc_page_lock);
3409         mutex_init(&trans_pcie->mutex);
3410         init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3411         init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3412
3413         trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3414                                                    WQ_HIGHPRI | WQ_UNBOUND, 1);
3415         if (!trans_pcie->rba.alloc_wq) {
3416                 ret = -ENOMEM;
3417                 goto out_free_trans;
3418         }
3419         INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3420
3421         trans_pcie->debug_rfkill = -1;
3422
3423         if (!cfg_trans->base_params->pcie_l1_allowed) {
3424                 /*
3425                  * W/A - seems to solve weird behavior. We need to remove this
3426                  * if we don't want to stay in L1 all the time. This wastes a
3427                  * lot of power.
3428                  */
3429                 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3430                                        PCIE_LINK_STATE_L1 |
3431                                        PCIE_LINK_STATE_CLKPM);
3432         }
3433
3434         trans_pcie->def_rx_queue = 0;
3435
3436         pci_set_master(pdev);
3437
3438         addr_size = trans->txqs.tfd.addr_size;
3439         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3440         if (!ret)
3441                 ret = pci_set_consistent_dma_mask(pdev,
3442                                                   DMA_BIT_MASK(addr_size));
3443         if (ret) {
3444                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3445                 if (!ret)
3446                         ret = pci_set_consistent_dma_mask(pdev,
3447                                                           DMA_BIT_MASK(32));
3448                 /* both attempts failed: */
3449                 if (ret) {
3450                         dev_err(&pdev->dev, "No suitable DMA available\n");
3451                         goto out_no_pci;
3452                 }
3453         }
3454
3455         ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3456         if (ret) {
3457                 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3458                 goto out_no_pci;
3459         }
3460
3461         trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3462         if (!trans_pcie->hw_base) {
3463                 dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3464                 ret = -ENODEV;
3465                 goto out_no_pci;
3466         }
3467
3468         /* We disable the RETRY_TIMEOUT register (0x41) to keep
3469          * PCI Tx retries from interfering with C3 CPU state */
3470         pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3471
3472         trans_pcie->pci_dev = pdev;
3473         iwl_disable_interrupts(trans);
3474
3475         trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3476         if (trans->hw_rev == 0xffffffff) {
3477                 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3478                 ret = -EIO;
3479                 goto out_no_pci;
3480         }
3481
3482         /*
3483          * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3484          * changed, and now the revision step also includes bit 0-1 (no more
3485          * "dash" value). To keep hw_rev backwards compatible - we'll store it
3486          * in the old format.
3487          */
3488         if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3489                 trans->hw_rev = (trans->hw_rev & 0xfff0) |
3490                                 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3491
3492         IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3493
3494         iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3495         trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3496         snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3497                  "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3498
3499         /* Initialize the wait queue for commands */
3500         init_waitqueue_head(&trans_pcie->wait_command_queue);
3501
3502         init_waitqueue_head(&trans_pcie->sx_waitq);
3503
3504
3505         if (trans_pcie->msix_enabled) {
3506                 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3507                 if (ret)
3508                         goto out_no_pci;
3509          } else {
3510                 ret = iwl_pcie_alloc_ict(trans);
3511                 if (ret)
3512                         goto out_no_pci;
3513
3514                 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3515                                                 iwl_pcie_isr,
3516                                                 iwl_pcie_irq_handler,
3517                                                 IRQF_SHARED, DRV_NAME, trans);
3518                 if (ret) {
3519                         IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3520                         goto out_free_ict;
3521                 }
3522          }
3523
3524 #ifdef CONFIG_IWLWIFI_DEBUGFS
3525         trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3526         mutex_init(&trans_pcie->fw_mon_data.mutex);
3527 #endif
3528
3529         iwl_dbg_tlv_init(trans);
3530
3531         return trans;
3532
3533 out_free_ict:
3534         iwl_pcie_free_ict(trans);
3535 out_no_pci:
3536         destroy_workqueue(trans_pcie->rba.alloc_wq);
3537 out_free_trans:
3538         iwl_trans_free(trans);
3539         return ERR_PTR(ret);
3540 }
3541
3542 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3543 {
3544         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3545         unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3546         bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
3547         u32 inta_addr, sw_err_bit;
3548
3549         if (trans_pcie->msix_enabled) {
3550                 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3551                 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3552         } else {
3553                 inta_addr = CSR_INT;
3554                 sw_err_bit = CSR_INT_BIT_SW_ERR;
3555         }
3556
3557         /* if the interrupts were already disabled, there is no point in
3558          * calling iwl_disable_interrupts
3559          */
3560         if (interrupts_enabled)
3561                 iwl_disable_interrupts(trans);
3562
3563         iwl_force_nmi(trans);
3564         while (time_after(timeout, jiffies)) {
3565                 u32 inta_hw = iwl_read32(trans, inta_addr);
3566
3567                 /* Error detected by uCode */
3568                 if (inta_hw & sw_err_bit) {
3569                         /* Clear causes register */
3570                         iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
3571                         break;
3572                 }
3573
3574                 mdelay(1);
3575         }
3576
3577         /* enable interrupts only if there were already enabled before this
3578          * function to avoid a case were the driver enable interrupts before
3579          * proper configurations were made
3580          */
3581         if (interrupts_enabled)
3582                 iwl_enable_interrupts(trans);
3583
3584         iwl_trans_fw_error(trans);
3585 }