1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/spinlock.h>
11 #include <linux/bitops.h>
17 #include "targaddrs.h"
26 enum ath10k_pci_reset_mode {
27 ATH10K_PCI_RESET_AUTO = 0,
28 ATH10K_PCI_RESET_WARM_ONLY = 1,
31 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
32 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
34 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
35 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
37 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
38 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
40 /* how long wait to wait for target to initialise, in ms */
41 #define ATH10K_PCI_TARGET_WAIT 3000
42 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
44 /* Maximum number of bytes that can be handled atomically by
45 * diag read and write.
47 #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
49 #define QCA99X0_PCIE_BAR0_START_REG 0x81030
50 #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
51 #define QCA99X0_CPU_MEM_DATA_REG 0x4d010
53 static const struct pci_device_id ath10k_pci_id_table[] = {
54 /* PCI-E QCA988X V2 (Ubiquiti branded) */
55 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
57 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
58 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
59 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
60 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
61 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
62 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
63 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
64 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
68 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
69 /* QCA988X pre 2.0 chips are not supported because they need some nasty
70 * hacks. ath10k doesn't have them and these devices crash horribly
73 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
74 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
78 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
79 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
84 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
85 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
88 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
90 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
92 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
94 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
95 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
97 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
100 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
101 static int ath10k_pci_cold_reset(struct ath10k *ar);
102 static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
103 static int ath10k_pci_init_irq(struct ath10k *ar);
104 static int ath10k_pci_deinit_irq(struct ath10k *ar);
105 static int ath10k_pci_request_irq(struct ath10k *ar);
106 static void ath10k_pci_free_irq(struct ath10k *ar);
107 static int ath10k_pci_bmi_wait(struct ath10k *ar,
108 struct ath10k_ce_pipe *tx_pipe,
109 struct ath10k_ce_pipe *rx_pipe,
110 struct bmi_xfer *xfer);
111 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
112 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
113 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
114 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
115 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
116 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
117 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
119 static struct ce_attr host_ce_config_wlan[] = {
120 /* CE0: host->target HTC control and raw streams */
122 .flags = CE_ATTR_FLAGS,
126 .send_cb = ath10k_pci_htc_tx_cb,
129 /* CE1: target->host HTT + HTC control */
131 .flags = CE_ATTR_FLAGS,
134 .dest_nentries = 512,
135 .recv_cb = ath10k_pci_htt_htc_rx_cb,
138 /* CE2: target->host WMI */
140 .flags = CE_ATTR_FLAGS,
143 .dest_nentries = 128,
144 .recv_cb = ath10k_pci_htc_rx_cb,
147 /* CE3: host->target WMI */
149 .flags = CE_ATTR_FLAGS,
153 .send_cb = ath10k_pci_htc_tx_cb,
156 /* CE4: host->target HTT */
158 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
159 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
162 .send_cb = ath10k_pci_htt_tx_cb,
165 /* CE5: target->host HTT (HIF->HTT) */
167 .flags = CE_ATTR_FLAGS,
170 .dest_nentries = 512,
171 .recv_cb = ath10k_pci_htt_rx_cb,
174 /* CE6: target autonomous hif_memcpy */
176 .flags = CE_ATTR_FLAGS,
182 /* CE7: ce_diag, the Diagnostic Window */
184 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
186 .src_sz_max = DIAG_TRANSFER_LIMIT,
190 /* CE8: target->host pktlog */
192 .flags = CE_ATTR_FLAGS,
195 .dest_nentries = 128,
196 .recv_cb = ath10k_pci_pktlog_rx_cb,
199 /* CE9 target autonomous qcache memcpy */
201 .flags = CE_ATTR_FLAGS,
207 /* CE10: target autonomous hif memcpy */
209 .flags = CE_ATTR_FLAGS,
215 /* CE11: target autonomous hif memcpy */
217 .flags = CE_ATTR_FLAGS,
224 /* Target firmware's Copy Engine configuration. */
225 static struct ce_pipe_config target_ce_config_wlan[] = {
226 /* CE0: host->target HTC control and raw streams */
228 .pipenum = __cpu_to_le32(0),
229 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
230 .nentries = __cpu_to_le32(32),
231 .nbytes_max = __cpu_to_le32(256),
232 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
233 .reserved = __cpu_to_le32(0),
236 /* CE1: target->host HTT + HTC control */
238 .pipenum = __cpu_to_le32(1),
239 .pipedir = __cpu_to_le32(PIPEDIR_IN),
240 .nentries = __cpu_to_le32(32),
241 .nbytes_max = __cpu_to_le32(2048),
242 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
243 .reserved = __cpu_to_le32(0),
246 /* CE2: target->host WMI */
248 .pipenum = __cpu_to_le32(2),
249 .pipedir = __cpu_to_le32(PIPEDIR_IN),
250 .nentries = __cpu_to_le32(64),
251 .nbytes_max = __cpu_to_le32(2048),
252 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
253 .reserved = __cpu_to_le32(0),
256 /* CE3: host->target WMI */
258 .pipenum = __cpu_to_le32(3),
259 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
260 .nentries = __cpu_to_le32(32),
261 .nbytes_max = __cpu_to_le32(2048),
262 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
263 .reserved = __cpu_to_le32(0),
266 /* CE4: host->target HTT */
268 .pipenum = __cpu_to_le32(4),
269 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
270 .nentries = __cpu_to_le32(256),
271 .nbytes_max = __cpu_to_le32(256),
272 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
273 .reserved = __cpu_to_le32(0),
276 /* NB: 50% of src nentries, since tx has 2 frags */
278 /* CE5: target->host HTT (HIF->HTT) */
280 .pipenum = __cpu_to_le32(5),
281 .pipedir = __cpu_to_le32(PIPEDIR_IN),
282 .nentries = __cpu_to_le32(32),
283 .nbytes_max = __cpu_to_le32(512),
284 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
285 .reserved = __cpu_to_le32(0),
288 /* CE6: Reserved for target autonomous hif_memcpy */
290 .pipenum = __cpu_to_le32(6),
291 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
292 .nentries = __cpu_to_le32(32),
293 .nbytes_max = __cpu_to_le32(4096),
294 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
295 .reserved = __cpu_to_le32(0),
298 /* CE7 used only by Host */
300 .pipenum = __cpu_to_le32(7),
301 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
302 .nentries = __cpu_to_le32(0),
303 .nbytes_max = __cpu_to_le32(0),
304 .flags = __cpu_to_le32(0),
305 .reserved = __cpu_to_le32(0),
308 /* CE8 target->host packtlog */
310 .pipenum = __cpu_to_le32(8),
311 .pipedir = __cpu_to_le32(PIPEDIR_IN),
312 .nentries = __cpu_to_le32(64),
313 .nbytes_max = __cpu_to_le32(2048),
314 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
315 .reserved = __cpu_to_le32(0),
318 /* CE9 target autonomous qcache memcpy */
320 .pipenum = __cpu_to_le32(9),
321 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
322 .nentries = __cpu_to_le32(32),
323 .nbytes_max = __cpu_to_le32(2048),
324 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
325 .reserved = __cpu_to_le32(0),
328 /* It not necessary to send target wlan configuration for CE10 & CE11
329 * as these CEs are not actively used in target.
334 * Map from service/endpoint to Copy Engine.
335 * This table is derived from the CE_PCI TABLE, above.
336 * It is passed to the Target at startup for use by firmware.
338 static struct service_to_pipe target_service_to_ce_map_wlan[] = {
340 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
341 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
345 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
346 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
350 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
351 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
355 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
356 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
360 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
361 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
365 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
366 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
370 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
371 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
375 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
376 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
380 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
381 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
385 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
386 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
390 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
391 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
395 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
396 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
400 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
401 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
405 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
406 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
410 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
411 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
415 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
416 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
420 /* (Additions here) */
429 static bool ath10k_pci_is_awake(struct ath10k *ar)
431 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
432 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
435 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
438 static void __ath10k_pci_wake(struct ath10k *ar)
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
442 lockdep_assert_held(&ar_pci->ps_lock);
444 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
445 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
452 static void __ath10k_pci_sleep(struct ath10k *ar)
454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
456 lockdep_assert_held(&ar_pci->ps_lock);
458 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
459 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
461 iowrite32(PCIE_SOC_WAKE_RESET,
462 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
463 PCIE_SOC_WAKE_ADDRESS);
464 ar_pci->ps_awake = false;
467 static int ath10k_pci_wake_wait(struct ath10k *ar)
472 while (tot_delay < PCIE_WAKE_TIMEOUT) {
473 if (ath10k_pci_is_awake(ar)) {
474 if (tot_delay > PCIE_WAKE_LATE_US)
475 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
481 tot_delay += curr_delay;
490 static int ath10k_pci_force_wake(struct ath10k *ar)
492 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
499 spin_lock_irqsave(&ar_pci->ps_lock, flags);
501 if (!ar_pci->ps_awake) {
502 iowrite32(PCIE_SOC_WAKE_V_MASK,
503 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
504 PCIE_SOC_WAKE_ADDRESS);
506 ret = ath10k_pci_wake_wait(ar);
508 ar_pci->ps_awake = true;
511 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
516 static void ath10k_pci_force_sleep(struct ath10k *ar)
518 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
521 spin_lock_irqsave(&ar_pci->ps_lock, flags);
523 iowrite32(PCIE_SOC_WAKE_RESET,
524 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
525 PCIE_SOC_WAKE_ADDRESS);
526 ar_pci->ps_awake = false;
528 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
531 static int ath10k_pci_wake(struct ath10k *ar)
533 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
537 if (ar_pci->pci_ps == 0)
540 spin_lock_irqsave(&ar_pci->ps_lock, flags);
542 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
543 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
545 /* This function can be called very frequently. To avoid excessive
546 * CPU stalls for MMIO reads use a cache var to hold the device state.
548 if (!ar_pci->ps_awake) {
549 __ath10k_pci_wake(ar);
551 ret = ath10k_pci_wake_wait(ar);
553 ar_pci->ps_awake = true;
557 ar_pci->ps_wake_refcount++;
558 WARN_ON(ar_pci->ps_wake_refcount == 0);
561 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
566 static void ath10k_pci_sleep(struct ath10k *ar)
568 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
571 if (ar_pci->pci_ps == 0)
574 spin_lock_irqsave(&ar_pci->ps_lock, flags);
576 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
577 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
579 if (WARN_ON(ar_pci->ps_wake_refcount == 0))
582 ar_pci->ps_wake_refcount--;
584 mod_timer(&ar_pci->ps_timer, jiffies +
585 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
588 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
591 static void ath10k_pci_ps_timer(struct timer_list *t)
593 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
594 struct ath10k *ar = ar_pci->ar;
597 spin_lock_irqsave(&ar_pci->ps_lock, flags);
599 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
600 ar_pci->ps_wake_refcount, ar_pci->ps_awake);
602 if (ar_pci->ps_wake_refcount > 0)
605 __ath10k_pci_sleep(ar);
608 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
611 static void ath10k_pci_sleep_sync(struct ath10k *ar)
613 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
616 if (ar_pci->pci_ps == 0) {
617 ath10k_pci_force_sleep(ar);
621 del_timer_sync(&ar_pci->ps_timer);
623 spin_lock_irqsave(&ar_pci->ps_lock, flags);
624 WARN_ON(ar_pci->ps_wake_refcount > 0);
625 __ath10k_pci_sleep(ar);
626 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
629 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
631 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
634 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
635 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
636 offset, offset + sizeof(value), ar_pci->mem_len);
640 ret = ath10k_pci_wake(ar);
642 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
647 iowrite32(value, ar_pci->mem + offset);
648 ath10k_pci_sleep(ar);
651 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
653 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
657 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
658 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
659 offset, offset + sizeof(val), ar_pci->mem_len);
663 ret = ath10k_pci_wake(ar);
665 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
670 val = ioread32(ar_pci->mem + offset);
671 ath10k_pci_sleep(ar);
676 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
678 struct ath10k_ce *ce = ath10k_ce_priv(ar);
680 ce->bus_ops->write32(ar, offset, value);
683 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
685 struct ath10k_ce *ce = ath10k_ce_priv(ar);
687 return ce->bus_ops->read32(ar, offset);
690 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
692 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
695 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
697 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
700 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
702 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
705 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
707 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
710 bool ath10k_pci_irq_pending(struct ath10k *ar)
714 /* Check if the shared legacy irq is for us */
715 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
716 PCIE_INTR_CAUSE_ADDRESS);
717 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
723 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
725 /* IMPORTANT: INTR_CLR register has to be set after
726 * INTR_ENABLE is set to 0, otherwise interrupt can not be
729 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
731 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
732 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
734 /* IMPORTANT: this extra read transaction is required to
735 * flush the posted write buffer.
737 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
738 PCIE_INTR_ENABLE_ADDRESS);
741 void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
743 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
744 PCIE_INTR_ENABLE_ADDRESS,
745 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
747 /* IMPORTANT: this extra read transaction is required to
748 * flush the posted write buffer.
750 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
751 PCIE_INTR_ENABLE_ADDRESS);
754 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
756 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
758 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
764 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
766 struct ath10k *ar = pipe->hif_ce_state;
767 struct ath10k_ce *ce = ath10k_ce_priv(ar);
768 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
773 skb = dev_alloc_skb(pipe->buf_sz);
777 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
779 paddr = dma_map_single(ar->dev, skb->data,
780 skb->len + skb_tailroom(skb),
782 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
783 ath10k_warn(ar, "failed to dma map pci rx buf\n");
784 dev_kfree_skb_any(skb);
788 ATH10K_SKB_RXCB(skb)->paddr = paddr;
790 spin_lock_bh(&ce->ce_lock);
791 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
792 spin_unlock_bh(&ce->ce_lock);
794 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
796 dev_kfree_skb_any(skb);
803 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
805 struct ath10k *ar = pipe->hif_ce_state;
806 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
807 struct ath10k_ce *ce = ath10k_ce_priv(ar);
808 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
811 if (pipe->buf_sz == 0)
814 if (!ce_pipe->dest_ring)
817 spin_lock_bh(&ce->ce_lock);
818 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
819 spin_unlock_bh(&ce->ce_lock);
822 ret = __ath10k_pci_rx_post_buf(pipe);
826 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
827 mod_timer(&ar_pci->rx_post_retry, jiffies +
828 ATH10K_PCI_RX_POST_RETRY_MS);
835 void ath10k_pci_rx_post(struct ath10k *ar)
837 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
840 for (i = 0; i < CE_COUNT; i++)
841 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
844 void ath10k_pci_rx_replenish_retry(struct timer_list *t)
846 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
847 struct ath10k *ar = ar_pci->ar;
849 ath10k_pci_rx_post(ar);
852 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
854 u32 val = 0, region = addr & 0xfffff;
856 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
858 val |= 0x100000 | region;
862 /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
863 * Support to access target space below 1M for qca6174 and qca9377.
864 * If target space is below 1M, the bit[20] of converted CE addr is 0.
865 * Otherwise bit[20] of converted CE addr is 1.
867 static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
869 u32 val = 0, region = addr & 0xfffff;
871 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
873 val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
877 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
879 u32 val = 0, region = addr & 0xfffff;
881 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
882 val |= 0x100000 | region;
886 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
888 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
890 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
893 return ar_pci->targ_cpu_to_ce_addr(ar, addr);
897 * Diagnostic read/write access is provided for startup/config/debug usage.
898 * Caller must guarantee proper alignment, when applicable, and single user
901 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
904 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
907 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
908 struct ath10k_ce_pipe *ce_diag;
909 /* Host buffer address in CE space */
911 dma_addr_t ce_data_base = 0;
912 void *data_buf = NULL;
915 mutex_lock(&ar_pci->ce_diag_mutex);
916 ce_diag = ar_pci->ce_diag;
919 * Allocate a temporary bounce buffer to hold caller's data
920 * to be DMA'ed from Target. This guarantees
921 * 1) 4-byte alignment
922 * 2) Buffer in DMA-able space
924 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
926 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
935 /* The address supplied by the caller is in the
936 * Target CPU virtual address space.
938 * In order to use this address with the diagnostic CE,
939 * convert it from Target CPU virtual address space
940 * to CE address space
942 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
944 remaining_bytes = nbytes;
945 ce_data = ce_data_base;
946 while (remaining_bytes) {
947 nbytes = min_t(unsigned int, remaining_bytes,
948 DIAG_TRANSFER_LIMIT);
950 ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
954 /* Request CE to send from Target(!) address to Host buffer */
955 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
960 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
961 udelay(DIAG_ACCESS_CE_WAIT_US);
962 i += DIAG_ACCESS_CE_WAIT_US;
964 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
971 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
972 &completed_nbytes) != 0) {
973 udelay(DIAG_ACCESS_CE_WAIT_US);
974 i += DIAG_ACCESS_CE_WAIT_US;
976 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
982 if (nbytes != completed_nbytes) {
987 if (*buf != ce_data) {
992 remaining_bytes -= nbytes;
993 memcpy(data, data_buf, nbytes);
1002 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1005 mutex_unlock(&ar_pci->ce_diag_mutex);
1010 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
1015 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
1016 *value = __le32_to_cpu(val);
1021 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1024 u32 host_addr, addr;
1027 host_addr = host_interest_item_address(src);
1029 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1031 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1036 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1038 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1046 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
1047 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1049 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1050 const void *data, int nbytes)
1052 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1055 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
1056 struct ath10k_ce_pipe *ce_diag;
1057 void *data_buf = NULL;
1058 dma_addr_t ce_data_base = 0;
1061 mutex_lock(&ar_pci->ce_diag_mutex);
1062 ce_diag = ar_pci->ce_diag;
1065 * Allocate a temporary bounce buffer to hold caller's data
1066 * to be DMA'ed to Target. This guarantees
1067 * 1) 4-byte alignment
1068 * 2) Buffer in DMA-able space
1070 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
1072 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1082 * The address supplied by the caller is in the
1083 * Target CPU virtual address space.
1085 * In order to use this address with the diagnostic CE,
1087 * Target CPU virtual address space
1091 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1093 remaining_bytes = nbytes;
1094 while (remaining_bytes) {
1095 /* FIXME: check cast */
1096 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1098 /* Copy caller's data to allocated DMA buf */
1099 memcpy(data_buf, data, nbytes);
1101 /* Set up to receive directly into Target(!) address */
1102 ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
1107 * Request CE to send caller-supplied data that
1108 * was copied to bounce buffer to Target(!) address.
1110 ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
1115 while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
1116 udelay(DIAG_ACCESS_CE_WAIT_US);
1117 i += DIAG_ACCESS_CE_WAIT_US;
1119 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1126 while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
1127 &completed_nbytes) != 0) {
1128 udelay(DIAG_ACCESS_CE_WAIT_US);
1129 i += DIAG_ACCESS_CE_WAIT_US;
1131 if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
1137 if (nbytes != completed_nbytes) {
1142 if (*buf != address) {
1147 remaining_bytes -= nbytes;
1154 dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
1159 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1162 mutex_unlock(&ar_pci->ce_diag_mutex);
1167 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1169 __le32 val = __cpu_to_le32(value);
1171 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1174 /* Called by lower (CE) layer when a send to Target completes. */
1175 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1177 struct ath10k *ar = ce_state->ar;
1178 struct sk_buff_head list;
1179 struct sk_buff *skb;
1181 __skb_queue_head_init(&list);
1182 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1183 /* no need to call tx completion for NULL pointers */
1187 __skb_queue_tail(&list, skb);
1190 while ((skb = __skb_dequeue(&list)))
1191 ath10k_htc_tx_completion_handler(ar, skb);
1194 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1195 void (*callback)(struct ath10k *ar,
1196 struct sk_buff *skb))
1198 struct ath10k *ar = ce_state->ar;
1199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1200 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1201 struct sk_buff *skb;
1202 struct sk_buff_head list;
1203 void *transfer_context;
1204 unsigned int nbytes, max_nbytes;
1206 __skb_queue_head_init(&list);
1207 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1209 skb = transfer_context;
1210 max_nbytes = skb->len + skb_tailroom(skb);
1211 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1212 max_nbytes, DMA_FROM_DEVICE);
1214 if (unlikely(max_nbytes < nbytes)) {
1215 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1216 nbytes, max_nbytes);
1217 dev_kfree_skb_any(skb);
1221 skb_put(skb, nbytes);
1222 __skb_queue_tail(&list, skb);
1225 while ((skb = __skb_dequeue(&list))) {
1226 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1227 ce_state->id, skb->len);
1228 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1229 skb->data, skb->len);
1234 ath10k_pci_rx_post_pipe(pipe_info);
1237 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1238 void (*callback)(struct ath10k *ar,
1239 struct sk_buff *skb))
1241 struct ath10k *ar = ce_state->ar;
1242 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1243 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
1244 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1245 struct sk_buff *skb;
1246 struct sk_buff_head list;
1247 void *transfer_context;
1248 unsigned int nbytes, max_nbytes, nentries;
1251 /* No need to aquire ce_lock for CE5, since this is the only place CE5
1252 * is processed other than init and deinit. Before releasing CE5
1253 * buffers, interrupts are disabled. Thus CE5 access is serialized.
1255 __skb_queue_head_init(&list);
1256 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1258 skb = transfer_context;
1259 max_nbytes = skb->len + skb_tailroom(skb);
1261 if (unlikely(max_nbytes < nbytes)) {
1262 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1263 nbytes, max_nbytes);
1267 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1268 max_nbytes, DMA_FROM_DEVICE);
1269 skb_put(skb, nbytes);
1270 __skb_queue_tail(&list, skb);
1273 nentries = skb_queue_len(&list);
1274 while ((skb = __skb_dequeue(&list))) {
1275 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1276 ce_state->id, skb->len);
1277 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1278 skb->data, skb->len);
1280 orig_len = skb->len;
1282 skb_push(skb, orig_len - skb->len);
1283 skb_reset_tail_pointer(skb);
1286 /*let device gain the buffer again*/
1287 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1288 skb->len + skb_tailroom(skb),
1291 ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1294 /* Called by lower (CE) layer when data is received from the Target. */
1295 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1297 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1300 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1302 /* CE4 polling needs to be done whenever CE pipe which transports
1303 * HTT Rx (target->host) is processed.
1305 ath10k_ce_per_engine_service(ce_state->ar, 4);
1307 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1310 /* Called by lower (CE) layer when data is received from the Target.
1311 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1313 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1315 ath10k_pci_process_rx_cb(ce_state,
1316 ath10k_htt_rx_pktlog_completion_handler);
1319 /* Called by lower (CE) layer when a send to HTT Target completes. */
1320 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1322 struct ath10k *ar = ce_state->ar;
1323 struct sk_buff *skb;
1325 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1326 /* no need to call tx completion for NULL pointers */
1330 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1331 skb->len, DMA_TO_DEVICE);
1332 ath10k_htt_hif_tx_complete(ar, skb);
1336 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1338 skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1339 ath10k_htt_t2h_msg_handler(ar, skb);
1342 /* Called by lower (CE) layer when HTT data is received from the Target. */
1343 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1345 /* CE4 polling needs to be done whenever CE pipe which transports
1346 * HTT Rx (target->host) is processed.
1348 ath10k_ce_per_engine_service(ce_state->ar, 4);
1350 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1353 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1354 struct ath10k_hif_sg_item *items, int n_items)
1356 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1357 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1358 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1359 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1360 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1361 unsigned int nentries_mask;
1362 unsigned int sw_index;
1363 unsigned int write_index;
1366 spin_lock_bh(&ce->ce_lock);
1368 nentries_mask = src_ring->nentries_mask;
1369 sw_index = src_ring->sw_index;
1370 write_index = src_ring->write_index;
1372 if (unlikely(CE_RING_DELTA(nentries_mask,
1373 write_index, sw_index - 1) < n_items)) {
1378 for (i = 0; i < n_items - 1; i++) {
1379 ath10k_dbg(ar, ATH10K_DBG_PCI,
1380 "pci tx item %d paddr %pad len %d n_items %d\n",
1381 i, &items[i].paddr, items[i].len, n_items);
1382 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1383 items[i].vaddr, items[i].len);
1385 err = ath10k_ce_send_nolock(ce_pipe,
1386 items[i].transfer_context,
1389 items[i].transfer_id,
1390 CE_SEND_FLAG_GATHER);
1395 /* `i` is equal to `n_items -1` after for() */
1397 ath10k_dbg(ar, ATH10K_DBG_PCI,
1398 "pci tx item %d paddr %pad len %d n_items %d\n",
1399 i, &items[i].paddr, items[i].len, n_items);
1400 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1401 items[i].vaddr, items[i].len);
1403 err = ath10k_ce_send_nolock(ce_pipe,
1404 items[i].transfer_context,
1407 items[i].transfer_id,
1412 spin_unlock_bh(&ce->ce_lock);
1417 __ath10k_ce_send_revert(ce_pipe);
1419 spin_unlock_bh(&ce->ce_lock);
1423 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1426 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1429 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1431 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1433 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1435 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1438 static void ath10k_pci_dump_registers(struct ath10k *ar,
1439 struct ath10k_fw_crash_data *crash_data)
1441 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1444 lockdep_assert_held(&ar->data_lock);
1446 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
1448 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1450 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1454 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1456 ath10k_err(ar, "firmware register dump:\n");
1457 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1458 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1460 __le32_to_cpu(reg_dump_values[i]),
1461 __le32_to_cpu(reg_dump_values[i + 1]),
1462 __le32_to_cpu(reg_dump_values[i + 2]),
1463 __le32_to_cpu(reg_dump_values[i + 3]));
1468 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1469 crash_data->registers[i] = reg_dump_values[i];
1472 static int ath10k_pci_dump_memory_section(struct ath10k *ar,
1473 const struct ath10k_mem_region *mem_region,
1474 u8 *buf, size_t buf_len)
1476 const struct ath10k_mem_section *cur_section, *next_section;
1477 unsigned int count, section_size, skip_size;
1480 if (!mem_region || !buf)
1483 cur_section = &mem_region->section_table.sections[0];
1485 if (mem_region->start > cur_section->start) {
1486 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
1487 mem_region->start, cur_section->start);
1491 skip_size = cur_section->start - mem_region->start;
1493 /* fill the gap between the first register section and register
1496 for (i = 0; i < skip_size; i++) {
1497 *buf = ATH10K_MAGIC_NOT_COPIED;
1503 for (i = 0; cur_section != NULL; i++) {
1504 section_size = cur_section->end - cur_section->start;
1506 if (section_size <= 0) {
1507 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
1513 if ((i + 1) == mem_region->section_table.size) {
1515 next_section = NULL;
1518 next_section = cur_section + 1;
1520 if (cur_section->end > next_section->start) {
1521 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
1522 next_section->start,
1527 skip_size = next_section->start - cur_section->end;
1530 if (buf_len < (skip_size + section_size)) {
1531 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
1535 buf_len -= skip_size + section_size;
1537 /* read section to dest memory */
1538 ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
1541 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
1542 cur_section->start, ret);
1546 buf += section_size;
1547 count += section_size;
1549 /* fill in the gap between this section and the next */
1550 for (j = 0; j < skip_size; j++) {
1551 *buf = ATH10K_MAGIC_NOT_COPIED;
1558 /* this was the last section */
1561 cur_section = next_section;
1567 static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
1571 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1572 FW_RAM_CONFIG_ADDRESS, config);
1574 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1575 FW_RAM_CONFIG_ADDRESS);
1576 if (val != config) {
1577 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
1585 /* if an error happened returns < 0, otherwise the length */
1586 static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
1587 const struct ath10k_mem_region *region,
1590 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1593 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
1594 base_addr += region->start;
1596 for (i = 0; i < region->len; i += 4) {
1597 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
1598 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
1604 /* if an error happened returns < 0, otherwise the length */
1605 static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
1606 const struct ath10k_mem_region *region,
1609 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1612 for (i = 0; i < region->len; i += 4)
1613 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
1618 /* if an error happened returns < 0, otherwise the length */
1619 static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
1620 const struct ath10k_mem_region *current_region,
1625 if (current_region->section_table.size > 0)
1626 /* Copy each section individually. */
1627 return ath10k_pci_dump_memory_section(ar,
1630 current_region->len);
1632 /* No individiual memory sections defined so we can
1633 * copy the entire memory region.
1635 ret = ath10k_pci_diag_read_mem(ar,
1636 current_region->start,
1638 current_region->len);
1640 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
1641 current_region->name, ret);
1645 return current_region->len;
1648 static void ath10k_pci_dump_memory(struct ath10k *ar,
1649 struct ath10k_fw_crash_data *crash_data)
1651 const struct ath10k_hw_mem_layout *mem_layout;
1652 const struct ath10k_mem_region *current_region;
1653 struct ath10k_dump_ram_data_hdr *hdr;
1659 lockdep_assert_held(&ar->data_lock);
1664 mem_layout = ath10k_coredump_get_mem_layout(ar);
1668 current_region = &mem_layout->region_table.regions[0];
1670 buf = crash_data->ramdump_buf;
1671 buf_len = crash_data->ramdump_buf_len;
1673 memset(buf, 0, buf_len);
1675 for (i = 0; i < mem_layout->region_table.size; i++) {
1678 if (current_region->len > buf_len) {
1679 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
1680 current_region->name,
1681 current_region->len,
1686 /* To get IRAM dump, the host driver needs to switch target
1687 * ram config from DRAM to IRAM.
1689 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
1690 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
1691 shift = current_region->start >> 20;
1693 ret = ath10k_pci_set_ram_config(ar, shift);
1695 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
1696 current_region->name, ret);
1701 /* Reserve space for the header. */
1703 buf += sizeof(*hdr);
1704 buf_len -= sizeof(*hdr);
1706 switch (current_region->type) {
1707 case ATH10K_MEM_REGION_TYPE_IOSRAM:
1708 count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
1710 case ATH10K_MEM_REGION_TYPE_IOREG:
1711 count = ath10k_pci_dump_memory_reg(ar, current_region, buf);
1714 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
1722 hdr->region_type = cpu_to_le32(current_region->type);
1723 hdr->start = cpu_to_le32(current_region->start);
1724 hdr->length = cpu_to_le32(count);
1727 /* Note: the header remains, just with zero length. */
1737 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1739 struct ath10k_fw_crash_data *crash_data;
1740 char guid[UUID_STRING_LEN + 1];
1742 spin_lock_bh(&ar->data_lock);
1744 ar->stats.fw_crash_counter++;
1746 crash_data = ath10k_coredump_new(ar);
1749 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
1751 scnprintf(guid, sizeof(guid), "n/a");
1753 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
1754 ath10k_print_driver_info(ar);
1755 ath10k_pci_dump_registers(ar, crash_data);
1756 ath10k_ce_dump_registers(ar, crash_data);
1757 ath10k_pci_dump_memory(ar, crash_data);
1759 spin_unlock_bh(&ar->data_lock);
1761 queue_work(ar->workqueue, &ar->restart_work);
1764 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1767 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1772 * Decide whether to actually poll for completions, or just
1773 * wait for a later chance.
1774 * If there seem to be plenty of resources left, then just wait
1775 * since checking involves reading a CE register, which is a
1776 * relatively expensive operation.
1778 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1781 * If at least 50% of the total resources are still available,
1782 * don't bother checking again yet.
1784 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1787 ath10k_ce_per_engine_service(ar, pipe);
1790 static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1792 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1794 del_timer_sync(&ar_pci->rx_post_retry);
1797 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1798 u8 *ul_pipe, u8 *dl_pipe)
1800 const struct service_to_pipe *entry;
1801 bool ul_set = false, dl_set = false;
1804 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1806 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1807 entry = &target_service_to_ce_map_wlan[i];
1809 if (__le32_to_cpu(entry->service_id) != service_id)
1812 switch (__le32_to_cpu(entry->pipedir)) {
1817 *dl_pipe = __le32_to_cpu(entry->pipenum);
1822 *ul_pipe = __le32_to_cpu(entry->pipenum);
1828 *dl_pipe = __le32_to_cpu(entry->pipenum);
1829 *ul_pipe = __le32_to_cpu(entry->pipenum);
1836 if (!ul_set || !dl_set)
1842 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1843 u8 *ul_pipe, u8 *dl_pipe)
1845 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1847 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1848 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1852 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1856 switch (ar->hw_rev) {
1857 case ATH10K_HW_QCA988X:
1858 case ATH10K_HW_QCA9887:
1859 case ATH10K_HW_QCA6174:
1860 case ATH10K_HW_QCA9377:
1861 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1863 val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1864 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1865 CORE_CTRL_ADDRESS, val);
1867 case ATH10K_HW_QCA99X0:
1868 case ATH10K_HW_QCA9984:
1869 case ATH10K_HW_QCA9888:
1870 case ATH10K_HW_QCA4019:
1871 /* TODO: Find appropriate register configuration for QCA99X0
1875 case ATH10K_HW_WCN3990:
1880 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1884 switch (ar->hw_rev) {
1885 case ATH10K_HW_QCA988X:
1886 case ATH10K_HW_QCA9887:
1887 case ATH10K_HW_QCA6174:
1888 case ATH10K_HW_QCA9377:
1889 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1891 val |= CORE_CTRL_PCIE_REG_31_MASK;
1892 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1893 CORE_CTRL_ADDRESS, val);
1895 case ATH10K_HW_QCA99X0:
1896 case ATH10K_HW_QCA9984:
1897 case ATH10K_HW_QCA9888:
1898 case ATH10K_HW_QCA4019:
1899 /* TODO: Find appropriate register configuration for QCA99X0
1900 * to unmask irq/MSI.
1903 case ATH10K_HW_WCN3990:
1908 static void ath10k_pci_irq_disable(struct ath10k *ar)
1910 ath10k_ce_disable_interrupts(ar);
1911 ath10k_pci_disable_and_clear_legacy_irq(ar);
1912 ath10k_pci_irq_msi_fw_mask(ar);
1915 static void ath10k_pci_irq_sync(struct ath10k *ar)
1917 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1919 synchronize_irq(ar_pci->pdev->irq);
1922 static void ath10k_pci_irq_enable(struct ath10k *ar)
1924 ath10k_ce_enable_interrupts(ar);
1925 ath10k_pci_enable_legacy_irq(ar);
1926 ath10k_pci_irq_msi_fw_unmask(ar);
1929 static int ath10k_pci_hif_start(struct ath10k *ar)
1931 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1933 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1935 napi_enable(&ar->napi);
1937 ath10k_pci_irq_enable(ar);
1938 ath10k_pci_rx_post(ar);
1940 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1946 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1949 struct ath10k_ce_pipe *ce_pipe;
1950 struct ath10k_ce_ring *ce_ring;
1951 struct sk_buff *skb;
1954 ar = pci_pipe->hif_ce_state;
1955 ce_pipe = pci_pipe->ce_hdl;
1956 ce_ring = ce_pipe->dest_ring;
1961 if (!pci_pipe->buf_sz)
1964 for (i = 0; i < ce_ring->nentries; i++) {
1965 skb = ce_ring->per_transfer_context[i];
1969 ce_ring->per_transfer_context[i] = NULL;
1971 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1972 skb->len + skb_tailroom(skb),
1974 dev_kfree_skb_any(skb);
1978 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1981 struct ath10k_ce_pipe *ce_pipe;
1982 struct ath10k_ce_ring *ce_ring;
1983 struct sk_buff *skb;
1986 ar = pci_pipe->hif_ce_state;
1987 ce_pipe = pci_pipe->ce_hdl;
1988 ce_ring = ce_pipe->src_ring;
1993 if (!pci_pipe->buf_sz)
1996 for (i = 0; i < ce_ring->nentries; i++) {
1997 skb = ce_ring->per_transfer_context[i];
2001 ce_ring->per_transfer_context[i] = NULL;
2003 ath10k_htc_tx_completion_handler(ar, skb);
2008 * Cleanup residual buffers for device shutdown:
2009 * buffers that were enqueued for receive
2010 * buffers that were to be sent
2011 * Note: Buffers that had completed but which were
2012 * not yet processed are on a completion queue. They
2013 * are handled when the completion thread shuts down.
2015 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
2017 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2020 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
2021 struct ath10k_pci_pipe *pipe_info;
2023 pipe_info = &ar_pci->pipe_info[pipe_num];
2024 ath10k_pci_rx_pipe_cleanup(pipe_info);
2025 ath10k_pci_tx_pipe_cleanup(pipe_info);
2029 void ath10k_pci_ce_deinit(struct ath10k *ar)
2033 for (i = 0; i < CE_COUNT; i++)
2034 ath10k_ce_deinit_pipe(ar, i);
2037 void ath10k_pci_flush(struct ath10k *ar)
2039 ath10k_pci_rx_retry_sync(ar);
2040 ath10k_pci_buffer_cleanup(ar);
2043 static void ath10k_pci_hif_stop(struct ath10k *ar)
2045 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2046 unsigned long flags;
2048 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
2050 /* Most likely the device has HTT Rx ring configured. The only way to
2051 * prevent the device from accessing (and possible corrupting) host
2052 * memory is to reset the chip now.
2054 * There's also no known way of masking MSI interrupts on the device.
2055 * For ranged MSI the CE-related interrupts can be masked. However
2056 * regardless how many MSI interrupts are assigned the first one
2057 * is always used for firmware indications (crashes) and cannot be
2058 * masked. To prevent the device from asserting the interrupt reset it
2059 * before proceeding with cleanup.
2061 ath10k_pci_safe_chip_reset(ar);
2063 ath10k_pci_irq_disable(ar);
2064 ath10k_pci_irq_sync(ar);
2065 napi_synchronize(&ar->napi);
2066 napi_disable(&ar->napi);
2067 ath10k_pci_flush(ar);
2069 spin_lock_irqsave(&ar_pci->ps_lock, flags);
2070 WARN_ON(ar_pci->ps_wake_refcount > 0);
2071 spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
2074 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2075 void *req, u32 req_len,
2076 void *resp, u32 *resp_len)
2078 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2079 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
2080 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
2081 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
2082 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
2083 dma_addr_t req_paddr = 0;
2084 dma_addr_t resp_paddr = 0;
2085 struct bmi_xfer xfer = {};
2086 void *treq, *tresp = NULL;
2091 if (resp && !resp_len)
2094 if (resp && resp_len && *resp_len == 0)
2097 treq = kmemdup(req, req_len, GFP_KERNEL);
2101 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2102 ret = dma_mapping_error(ar->dev, req_paddr);
2108 if (resp && resp_len) {
2109 tresp = kzalloc(*resp_len, GFP_KERNEL);
2115 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
2117 ret = dma_mapping_error(ar->dev, resp_paddr);
2123 xfer.wait_for_resp = true;
2126 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
2129 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
2133 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
2135 dma_addr_t unused_buffer;
2136 unsigned int unused_nbytes;
2137 unsigned int unused_id;
2139 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
2140 &unused_nbytes, &unused_id);
2142 /* non-zero means we did not time out */
2148 dma_addr_t unused_buffer;
2150 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
2151 dma_unmap_single(ar->dev, resp_paddr,
2152 *resp_len, DMA_FROM_DEVICE);
2155 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
2157 if (ret == 0 && resp_len) {
2158 *resp_len = min(*resp_len, xfer.resp_len);
2159 memcpy(resp, tresp, xfer.resp_len);
2168 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
2170 struct bmi_xfer *xfer;
2172 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
2175 xfer->tx_done = true;
2178 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
2180 struct ath10k *ar = ce_state->ar;
2181 struct bmi_xfer *xfer;
2182 unsigned int nbytes;
2184 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
2188 if (WARN_ON_ONCE(!xfer))
2191 if (!xfer->wait_for_resp) {
2192 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
2196 xfer->resp_len = nbytes;
2197 xfer->rx_done = true;
2200 static int ath10k_pci_bmi_wait(struct ath10k *ar,
2201 struct ath10k_ce_pipe *tx_pipe,
2202 struct ath10k_ce_pipe *rx_pipe,
2203 struct bmi_xfer *xfer)
2205 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
2206 unsigned long started = jiffies;
2210 while (time_before_eq(jiffies, timeout)) {
2211 ath10k_pci_bmi_send_done(tx_pipe);
2212 ath10k_pci_bmi_recv_data(rx_pipe);
2214 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
2225 dur = jiffies - started;
2227 ath10k_dbg(ar, ATH10K_DBG_BMI,
2228 "bmi cmd took %lu jiffies hz %d ret %d\n",
2234 * Send an interrupt to the device to wake up the Target CPU
2235 * so it has an opportunity to notice any changed state.
2237 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
2241 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
2242 val = ath10k_pci_read32(ar, addr);
2243 val |= CORE_CTRL_CPU_INTR_MASK;
2244 ath10k_pci_write32(ar, addr, val);
2249 static int ath10k_pci_get_num_banks(struct ath10k *ar)
2251 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2253 switch (ar_pci->pdev->device) {
2254 case QCA988X_2_0_DEVICE_ID_UBNT:
2255 case QCA988X_2_0_DEVICE_ID:
2256 case QCA99X0_2_0_DEVICE_ID:
2257 case QCA9888_2_0_DEVICE_ID:
2258 case QCA9984_1_0_DEVICE_ID:
2259 case QCA9887_1_0_DEVICE_ID:
2261 case QCA6164_2_1_DEVICE_ID:
2262 case QCA6174_2_1_DEVICE_ID:
2263 switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
2264 case QCA6174_HW_1_0_CHIP_ID_REV:
2265 case QCA6174_HW_1_1_CHIP_ID_REV:
2266 case QCA6174_HW_2_1_CHIP_ID_REV:
2267 case QCA6174_HW_2_2_CHIP_ID_REV:
2269 case QCA6174_HW_1_3_CHIP_ID_REV:
2271 case QCA6174_HW_3_0_CHIP_ID_REV:
2272 case QCA6174_HW_3_1_CHIP_ID_REV:
2273 case QCA6174_HW_3_2_CHIP_ID_REV:
2277 case QCA9377_1_0_DEVICE_ID:
2281 ath10k_warn(ar, "unknown number of banks, assuming 1\n");
2285 static int ath10k_bus_get_num_banks(struct ath10k *ar)
2287 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2289 return ce->bus_ops->get_num_banks(ar);
2292 int ath10k_pci_init_config(struct ath10k *ar)
2294 u32 interconnect_targ_addr;
2295 u32 pcie_state_targ_addr = 0;
2296 u32 pipe_cfg_targ_addr = 0;
2297 u32 svc_to_pipe_map = 0;
2298 u32 pcie_config_flags = 0;
2300 u32 ealloc_targ_addr;
2302 u32 flag2_targ_addr;
2305 /* Download to Target the CE Config and the service-to-CE map */
2306 interconnect_targ_addr =
2307 host_interest_item_address(HI_ITEM(hi_interconnect_state));
2309 /* Supply Target-side CE configuration */
2310 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2311 &pcie_state_targ_addr);
2313 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2317 if (pcie_state_targ_addr == 0) {
2319 ath10k_err(ar, "Invalid pcie state addr\n");
2323 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2324 offsetof(struct pcie_state,
2326 &pipe_cfg_targ_addr);
2328 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2332 if (pipe_cfg_targ_addr == 0) {
2334 ath10k_err(ar, "Invalid pipe cfg addr\n");
2338 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2339 target_ce_config_wlan,
2340 sizeof(struct ce_pipe_config) *
2341 NUM_TARGET_CE_CONFIG_WLAN);
2344 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2348 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2349 offsetof(struct pcie_state,
2353 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2357 if (svc_to_pipe_map == 0) {
2359 ath10k_err(ar, "Invalid svc_to_pipe map\n");
2363 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2364 target_service_to_ce_map_wlan,
2365 sizeof(target_service_to_ce_map_wlan));
2367 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2371 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2372 offsetof(struct pcie_state,
2374 &pcie_config_flags);
2376 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2380 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2382 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2383 offsetof(struct pcie_state,
2387 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2391 /* configure early allocation */
2392 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2394 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2396 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2400 /* first bank is switched to IRAM */
2401 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2402 HI_EARLY_ALLOC_MAGIC_MASK);
2403 ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2404 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2405 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2407 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2409 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2413 /* Tell Target to proceed with initialization */
2414 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2416 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2418 ath10k_err(ar, "Failed to get option val: %d\n", ret);
2422 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2424 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2426 ath10k_err(ar, "Failed to set option val: %d\n", ret);
2433 static void ath10k_pci_override_ce_config(struct ath10k *ar)
2435 struct ce_attr *attr;
2436 struct ce_pipe_config *config;
2438 /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2439 * since it is currently used for other feature.
2442 /* Override Host's Copy Engine 5 configuration */
2443 attr = &host_ce_config_wlan[5];
2444 attr->src_sz_max = 0;
2445 attr->dest_nentries = 0;
2447 /* Override Target firmware's Copy Engine configuration */
2448 config = &target_ce_config_wlan[5];
2449 config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2450 config->nbytes_max = __cpu_to_le32(2048);
2452 /* Map from service/endpoint to Copy Engine */
2453 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2456 int ath10k_pci_alloc_pipes(struct ath10k *ar)
2458 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2459 struct ath10k_pci_pipe *pipe;
2460 struct ath10k_ce *ce = ath10k_ce_priv(ar);
2463 for (i = 0; i < CE_COUNT; i++) {
2464 pipe = &ar_pci->pipe_info[i];
2465 pipe->ce_hdl = &ce->ce_states[i];
2467 pipe->hif_ce_state = ar;
2469 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2471 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2476 /* Last CE is Diagnostic Window */
2477 if (i == CE_DIAG_PIPE) {
2478 ar_pci->ce_diag = pipe->ce_hdl;
2482 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2488 void ath10k_pci_free_pipes(struct ath10k *ar)
2492 for (i = 0; i < CE_COUNT; i++)
2493 ath10k_ce_free_pipe(ar, i);
2496 int ath10k_pci_init_pipes(struct ath10k *ar)
2500 for (i = 0; i < CE_COUNT; i++) {
2501 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2503 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2512 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2514 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2515 FW_IND_EVENT_PENDING;
2518 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2522 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2523 val &= ~FW_IND_EVENT_PENDING;
2524 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2527 static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2531 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2532 return (val == 0xffffffff);
2535 /* this function effectively clears target memory controller assert line */
2536 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2540 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2541 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2542 val | SOC_RESET_CONTROL_SI0_RST_MASK);
2543 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2547 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2548 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2549 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2550 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2555 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2559 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2561 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2562 SOC_RESET_CONTROL_ADDRESS);
2563 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2564 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2567 static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2571 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2572 SOC_RESET_CONTROL_ADDRESS);
2574 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2575 val | SOC_RESET_CONTROL_CE_RST_MASK);
2577 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2578 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2581 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2585 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2586 SOC_LF_TIMER_CONTROL0_ADDRESS);
2587 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2588 SOC_LF_TIMER_CONTROL0_ADDRESS,
2589 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2592 static int ath10k_pci_warm_reset(struct ath10k *ar)
2596 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2598 spin_lock_bh(&ar->data_lock);
2599 ar->stats.fw_warm_reset_counter++;
2600 spin_unlock_bh(&ar->data_lock);
2602 ath10k_pci_irq_disable(ar);
2604 /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2605 * were to access copy engine while host performs copy engine reset
2606 * then it is possible for the device to confuse pci-e controller to
2607 * the point of bringing host system to a complete stop (i.e. hang).
2609 ath10k_pci_warm_reset_si0(ar);
2610 ath10k_pci_warm_reset_cpu(ar);
2611 ath10k_pci_init_pipes(ar);
2612 ath10k_pci_wait_for_target_init(ar);
2614 ath10k_pci_warm_reset_clear_lf(ar);
2615 ath10k_pci_warm_reset_ce(ar);
2616 ath10k_pci_warm_reset_cpu(ar);
2617 ath10k_pci_init_pipes(ar);
2619 ret = ath10k_pci_wait_for_target_init(ar);
2621 ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2625 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2630 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2632 ath10k_pci_irq_disable(ar);
2633 return ath10k_pci_qca99x0_chip_reset(ar);
2636 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2638 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2640 if (!ar_pci->pci_soft_reset)
2643 return ar_pci->pci_soft_reset(ar);
2646 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2651 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2653 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2654 * It is thus preferred to use warm reset which is safer but may not be
2655 * able to recover the device from all possible fail scenarios.
2657 * Warm reset doesn't always work on first try so attempt it a few
2658 * times before giving up.
2660 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2661 ret = ath10k_pci_warm_reset(ar);
2663 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2664 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2669 /* FIXME: Sometimes copy engine doesn't recover after warm
2670 * reset. In most cases this needs cold reset. In some of these
2671 * cases the device is in such a state that a cold reset may
2674 * Reading any host interest register via copy engine is
2675 * sufficient to verify if device is capable of booting
2678 ret = ath10k_pci_init_pipes(ar);
2680 ath10k_warn(ar, "failed to init copy engine: %d\n",
2685 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2688 ath10k_warn(ar, "failed to poke copy engine: %d\n",
2693 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2697 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2698 ath10k_warn(ar, "refusing cold reset as requested\n");
2702 ret = ath10k_pci_cold_reset(ar);
2704 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2708 ret = ath10k_pci_wait_for_target_init(ar);
2710 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2715 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2720 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2724 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2726 /* FIXME: QCA6174 requires cold + warm reset to work. */
2728 ret = ath10k_pci_cold_reset(ar);
2730 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2734 ret = ath10k_pci_wait_for_target_init(ar);
2736 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2741 ret = ath10k_pci_warm_reset(ar);
2743 ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2747 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2752 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2756 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2758 ret = ath10k_pci_cold_reset(ar);
2760 ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2764 ret = ath10k_pci_wait_for_target_init(ar);
2766 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2771 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2776 static int ath10k_pci_chip_reset(struct ath10k *ar)
2778 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2780 if (WARN_ON(!ar_pci->pci_hard_reset))
2783 return ar_pci->pci_hard_reset(ar);
2786 static int ath10k_pci_hif_power_up(struct ath10k *ar,
2787 enum ath10k_firmware_mode fw_mode)
2789 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2792 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2794 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2796 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2797 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2800 * Bring the target up cleanly.
2802 * The target may be in an undefined state with an AUX-powered Target
2803 * and a Host in WoW mode. If the Host crashes, loses power, or is
2804 * restarted (without unloading the driver) then the Target is left
2805 * (aux) powered and running. On a subsequent driver load, the Target
2806 * is in an unexpected state. We try to catch that here in order to
2807 * reset the Target and retry the probe.
2809 ret = ath10k_pci_chip_reset(ar);
2811 if (ath10k_pci_has_fw_crashed(ar)) {
2812 ath10k_warn(ar, "firmware crashed during chip reset\n");
2813 ath10k_pci_fw_crashed_clear(ar);
2814 ath10k_pci_fw_crashed_dump(ar);
2817 ath10k_err(ar, "failed to reset chip: %d\n", ret);
2821 ret = ath10k_pci_init_pipes(ar);
2823 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2827 ret = ath10k_pci_init_config(ar);
2829 ath10k_err(ar, "failed to setup init config: %d\n", ret);
2833 ret = ath10k_pci_wake_target_cpu(ar);
2835 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2842 ath10k_pci_ce_deinit(ar);
2848 void ath10k_pci_hif_power_down(struct ath10k *ar)
2850 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2852 /* Currently hif_power_up performs effectively a reset and hif_stop
2853 * resets the chip as well so there's no point in resetting here.
2857 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2859 /* Nothing to do; the important stuff is in the driver suspend. */
2863 static int ath10k_pci_suspend(struct ath10k *ar)
2865 /* The grace timer can still be counting down and ar->ps_awake be true.
2866 * It is known that the device may be asleep after resuming regardless
2867 * of the SoC powersave state before suspending. Hence make sure the
2868 * device is asleep before proceeding.
2870 ath10k_pci_sleep_sync(ar);
2875 static int ath10k_pci_hif_resume(struct ath10k *ar)
2877 /* Nothing to do; the important stuff is in the driver resume. */
2881 static int ath10k_pci_resume(struct ath10k *ar)
2883 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2884 struct pci_dev *pdev = ar_pci->pdev;
2888 ret = ath10k_pci_force_wake(ar);
2890 ath10k_err(ar, "failed to wake up target: %d\n", ret);
2894 /* Suspend/Resume resets the PCI configuration space, so we have to
2895 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2896 * from interfering with C3 CPU state. pci_restore_state won't help
2897 * here since it only restores the first 64 bytes pci config header.
2899 pci_read_config_dword(pdev, 0x40, &val);
2900 if ((val & 0x0000ff00) != 0)
2901 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2906 static bool ath10k_pci_validate_cal(void *data, size_t size)
2908 __le16 *cal_words = data;
2915 for (i = 0; i < size / 2; i++)
2916 checksum ^= le16_to_cpu(cal_words[i]);
2918 return checksum == 0xffff;
2921 static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2923 /* Enable SI clock */
2924 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2926 /* Configure GPIOs for I2C operation */
2927 ath10k_pci_write32(ar,
2928 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2929 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2930 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2932 SM(1, GPIO_PIN0_PAD_PULL));
2934 ath10k_pci_write32(ar,
2935 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2936 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2937 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2938 SM(1, GPIO_PIN0_PAD_PULL));
2940 ath10k_pci_write32(ar,
2942 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2943 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2945 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2946 ath10k_pci_write32(ar,
2947 SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2948 SM(1, SI_CONFIG_ERR_INT) |
2949 SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2950 SM(1, SI_CONFIG_I2C) |
2951 SM(1, SI_CONFIG_POS_SAMPLE) |
2952 SM(1, SI_CONFIG_INACTIVE_DATA) |
2953 SM(1, SI_CONFIG_INACTIVE_CLK) |
2954 SM(8, SI_CONFIG_DIVIDER));
2957 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2962 /* set device select byte and for the read operation */
2963 reg = QCA9887_EEPROM_SELECT_READ |
2964 SM(addr, QCA9887_EEPROM_ADDR_LO) |
2965 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2966 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2968 /* write transmit data, transfer length, and START bit */
2969 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2970 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2971 SM(4, SI_CS_TX_CNT));
2973 /* wait max 1 sec */
2974 wait_limit = 100000;
2976 /* wait for SI_CS_DONE_INT */
2978 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2979 if (MS(reg, SI_CS_DONE_INT))
2984 } while (wait_limit > 0);
2986 if (!MS(reg, SI_CS_DONE_INT)) {
2987 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2992 /* clear SI_CS_DONE_INT */
2993 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2995 if (MS(reg, SI_CS_DONE_ERR)) {
2996 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
3000 /* extract receive data */
3001 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
3007 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
3014 if (!QCA_REV_9887(ar))
3017 calsize = ar->hw_params.cal_data_len;
3018 caldata = kmalloc(calsize, GFP_KERNEL);
3022 ath10k_pci_enable_eeprom(ar);
3024 for (i = 0; i < calsize; i++) {
3025 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
3030 if (!ath10k_pci_validate_cal(caldata, calsize))
3034 *data_len = calsize;
3044 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
3045 .tx_sg = ath10k_pci_hif_tx_sg,
3046 .diag_read = ath10k_pci_hif_diag_read,
3047 .diag_write = ath10k_pci_diag_write_mem,
3048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
3049 .start = ath10k_pci_hif_start,
3050 .stop = ath10k_pci_hif_stop,
3051 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
3052 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
3053 .send_complete_check = ath10k_pci_hif_send_complete_check,
3054 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
3055 .power_up = ath10k_pci_hif_power_up,
3056 .power_down = ath10k_pci_hif_power_down,
3057 .read32 = ath10k_pci_read32,
3058 .write32 = ath10k_pci_write32,
3059 .suspend = ath10k_pci_hif_suspend,
3060 .resume = ath10k_pci_hif_resume,
3061 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
3065 * Top-level interrupt handler for all PCI interrupts from a Target.
3066 * When a block of MSI interrupts is allocated, this top-level handler
3067 * is not used; instead, we directly call the correct sub-handler.
3069 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
3071 struct ath10k *ar = arg;
3072 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3075 if (ath10k_pci_has_device_gone(ar))
3078 ret = ath10k_pci_force_wake(ar);
3080 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
3084 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
3085 !ath10k_pci_irq_pending(ar))
3088 ath10k_pci_disable_and_clear_legacy_irq(ar);
3089 ath10k_pci_irq_msi_fw_mask(ar);
3090 napi_schedule(&ar->napi);
3095 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
3097 struct ath10k *ar = container_of(ctx, struct ath10k, napi);
3100 if (ath10k_pci_has_fw_crashed(ar)) {
3101 ath10k_pci_fw_crashed_clear(ar);
3102 ath10k_pci_fw_crashed_dump(ar);
3107 ath10k_ce_per_engine_service_any(ar);
3109 done = ath10k_htt_txrx_compl_task(ar, budget);
3111 if (done < budget) {
3112 napi_complete_done(ctx, done);
3113 /* In case of MSI, it is possible that interrupts are received
3114 * while NAPI poll is inprogress. So pending interrupts that are
3115 * received after processing all copy engine pipes by NAPI poll
3116 * will not be handled again. This is causing failure to
3117 * complete boot sequence in x86 platform. So before enabling
3118 * interrupts safer to check for pending interrupts for
3119 * immediate servicing.
3121 if (ath10k_ce_interrupt_summary(ar)) {
3122 napi_reschedule(ctx);
3125 ath10k_pci_enable_legacy_irq(ar);
3126 ath10k_pci_irq_msi_fw_unmask(ar);
3133 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
3135 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3138 ret = request_irq(ar_pci->pdev->irq,
3139 ath10k_pci_interrupt_handler,
3140 IRQF_SHARED, "ath10k_pci", ar);
3142 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
3143 ar_pci->pdev->irq, ret);
3150 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
3152 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3155 ret = request_irq(ar_pci->pdev->irq,
3156 ath10k_pci_interrupt_handler,
3157 IRQF_SHARED, "ath10k_pci", ar);
3159 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
3160 ar_pci->pdev->irq, ret);
3167 static int ath10k_pci_request_irq(struct ath10k *ar)
3169 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3171 switch (ar_pci->oper_irq_mode) {
3172 case ATH10K_PCI_IRQ_LEGACY:
3173 return ath10k_pci_request_irq_legacy(ar);
3174 case ATH10K_PCI_IRQ_MSI:
3175 return ath10k_pci_request_irq_msi(ar);
3181 static void ath10k_pci_free_irq(struct ath10k *ar)
3183 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3185 free_irq(ar_pci->pdev->irq, ar);
3188 void ath10k_pci_init_napi(struct ath10k *ar)
3190 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
3191 ATH10K_NAPI_BUDGET);
3194 static int ath10k_pci_init_irq(struct ath10k *ar)
3196 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3199 ath10k_pci_init_napi(ar);
3201 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
3202 ath10k_info(ar, "limiting irq mode to: %d\n",
3203 ath10k_pci_irq_mode);
3206 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
3207 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
3208 ret = pci_enable_msi(ar_pci->pdev);
3217 * A potential race occurs here: The CORE_BASE write
3218 * depends on target correctly decoding AXI address but
3219 * host won't know when target writes BAR to CORE_CTRL.
3220 * This write might get lost if target has NOT written BAR.
3221 * For now, fix the race by repeating the write in below
3222 * synchronization checking.
3224 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
3226 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3227 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3232 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
3234 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
3238 static int ath10k_pci_deinit_irq(struct ath10k *ar)
3240 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3242 switch (ar_pci->oper_irq_mode) {
3243 case ATH10K_PCI_IRQ_LEGACY:
3244 ath10k_pci_deinit_irq_legacy(ar);
3247 pci_disable_msi(ar_pci->pdev);
3254 int ath10k_pci_wait_for_target_init(struct ath10k *ar)
3256 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3257 unsigned long timeout;
3260 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
3262 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
3265 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
3267 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
3270 /* target should never return this */
3271 if (val == 0xffffffff)
3274 /* the device has crashed so don't bother trying anymore */
3275 if (val & FW_IND_EVENT_PENDING)
3278 if (val & FW_IND_INITIALIZED)
3281 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
3282 /* Fix potential race by repeating CORE_BASE writes */
3283 ath10k_pci_enable_legacy_irq(ar);
3286 } while (time_before(jiffies, timeout));
3288 ath10k_pci_disable_and_clear_legacy_irq(ar);
3289 ath10k_pci_irq_msi_fw_mask(ar);
3291 if (val == 0xffffffff) {
3292 ath10k_err(ar, "failed to read device register, device is gone\n");
3296 if (val & FW_IND_EVENT_PENDING) {
3297 ath10k_warn(ar, "device has crashed during init\n");
3301 if (!(val & FW_IND_INITIALIZED)) {
3302 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
3307 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
3311 static int ath10k_pci_cold_reset(struct ath10k *ar)
3315 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3317 spin_lock_bh(&ar->data_lock);
3319 ar->stats.fw_cold_reset_counter++;
3321 spin_unlock_bh(&ar->data_lock);
3323 /* Put Target, including PCIe, into RESET. */
3324 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3326 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3328 /* After writing into SOC_GLOBAL_RESET to put device into
3329 * reset and pulling out of reset pcie may not be stable
3330 * for any immediate pcie register access and cause bus error,
3331 * add delay before any pcie access request to fix this issue.
3335 /* Pull Target, including PCIe, out of RESET. */
3337 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3341 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3346 static int ath10k_pci_claim(struct ath10k *ar)
3348 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3349 struct pci_dev *pdev = ar_pci->pdev;
3352 pci_set_drvdata(pdev, ar);
3354 ret = pci_enable_device(pdev);
3356 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3360 ret = pci_request_region(pdev, BAR_NUM, "ath");
3362 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3367 /* Target expects 32 bit DMA. Enforce it. */
3368 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3370 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3374 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3376 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3381 pci_set_master(pdev);
3383 /* Arrange for access to Target SoC registers. */
3384 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3385 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3387 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3392 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3396 pci_clear_master(pdev);
3399 pci_release_region(pdev, BAR_NUM);
3402 pci_disable_device(pdev);
3407 static void ath10k_pci_release(struct ath10k *ar)
3409 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3410 struct pci_dev *pdev = ar_pci->pdev;
3412 pci_iounmap(pdev, ar_pci->mem);
3413 pci_release_region(pdev, BAR_NUM);
3414 pci_clear_master(pdev);
3415 pci_disable_device(pdev);
3418 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3420 const struct ath10k_pci_supp_chip *supp_chip;
3422 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3424 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3425 supp_chip = &ath10k_pci_supp_chips[i];
3427 if (supp_chip->dev_id == dev_id &&
3428 supp_chip->rev_id == rev_id)
3435 int ath10k_pci_setup_resource(struct ath10k *ar)
3437 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3438 struct ath10k_ce *ce = ath10k_ce_priv(ar);
3441 spin_lock_init(&ce->ce_lock);
3442 spin_lock_init(&ar_pci->ps_lock);
3443 mutex_init(&ar_pci->ce_diag_mutex);
3445 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
3447 if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
3448 ath10k_pci_override_ce_config(ar);
3450 ret = ath10k_pci_alloc_pipes(ar);
3452 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3460 void ath10k_pci_release_resource(struct ath10k *ar)
3462 ath10k_pci_rx_retry_sync(ar);
3463 netif_napi_del(&ar->napi);
3464 ath10k_pci_ce_deinit(ar);
3465 ath10k_pci_free_pipes(ar);
3468 static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3469 .read32 = ath10k_bus_pci_read32,
3470 .write32 = ath10k_bus_pci_write32,
3471 .get_num_banks = ath10k_pci_get_num_banks,
3474 static int ath10k_pci_probe(struct pci_dev *pdev,
3475 const struct pci_device_id *pci_dev)
3479 struct ath10k_pci *ar_pci;
3480 enum ath10k_hw_rev hw_rev;
3481 struct ath10k_bus_params bus_params;
3483 int (*pci_soft_reset)(struct ath10k *ar);
3484 int (*pci_hard_reset)(struct ath10k *ar);
3485 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
3487 switch (pci_dev->device) {
3488 case QCA988X_2_0_DEVICE_ID_UBNT:
3489 case QCA988X_2_0_DEVICE_ID:
3490 hw_rev = ATH10K_HW_QCA988X;
3492 pci_soft_reset = ath10k_pci_warm_reset;
3493 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3494 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3496 case QCA9887_1_0_DEVICE_ID:
3497 hw_rev = ATH10K_HW_QCA9887;
3499 pci_soft_reset = ath10k_pci_warm_reset;
3500 pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3501 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
3503 case QCA6164_2_1_DEVICE_ID:
3504 case QCA6174_2_1_DEVICE_ID:
3505 hw_rev = ATH10K_HW_QCA6174;
3507 pci_soft_reset = ath10k_pci_warm_reset;
3508 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3509 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3511 case QCA99X0_2_0_DEVICE_ID:
3512 hw_rev = ATH10K_HW_QCA99X0;
3514 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3515 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3516 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3518 case QCA9984_1_0_DEVICE_ID:
3519 hw_rev = ATH10K_HW_QCA9984;
3521 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3522 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3523 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3525 case QCA9888_2_0_DEVICE_ID:
3526 hw_rev = ATH10K_HW_QCA9888;
3528 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3529 pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3530 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
3532 case QCA9377_1_0_DEVICE_ID:
3533 hw_rev = ATH10K_HW_QCA9377;
3535 pci_soft_reset = ath10k_pci_warm_reset;
3536 pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3537 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
3544 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3545 hw_rev, &ath10k_pci_hif_ops);
3547 dev_err(&pdev->dev, "failed to allocate core\n");
3551 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3552 pdev->vendor, pdev->device,
3553 pdev->subsystem_vendor, pdev->subsystem_device);
3555 ar_pci = ath10k_pci_priv(ar);
3556 ar_pci->pdev = pdev;
3557 ar_pci->dev = &pdev->dev;
3559 ar->dev_id = pci_dev->device;
3560 ar_pci->pci_ps = pci_ps;
3561 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
3562 ar_pci->pci_soft_reset = pci_soft_reset;
3563 ar_pci->pci_hard_reset = pci_hard_reset;
3564 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
3565 ar->ce_priv = &ar_pci->ce;
3567 ar->id.vendor = pdev->vendor;
3568 ar->id.device = pdev->device;
3569 ar->id.subsystem_vendor = pdev->subsystem_vendor;
3570 ar->id.subsystem_device = pdev->subsystem_device;
3572 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
3574 ret = ath10k_pci_setup_resource(ar);
3576 ath10k_err(ar, "failed to setup resource: %d\n", ret);
3577 goto err_core_destroy;
3580 ret = ath10k_pci_claim(ar);
3582 ath10k_err(ar, "failed to claim device: %d\n", ret);
3583 goto err_free_pipes;
3586 ret = ath10k_pci_force_wake(ar);
3588 ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3592 ath10k_pci_ce_deinit(ar);
3593 ath10k_pci_irq_disable(ar);
3595 ret = ath10k_pci_init_irq(ar);
3597 ath10k_err(ar, "failed to init irqs: %d\n", ret);
3601 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3602 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3603 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3605 ret = ath10k_pci_request_irq(ar);
3607 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3608 goto err_deinit_irq;
3611 ret = ath10k_pci_chip_reset(ar);
3613 ath10k_err(ar, "failed to reset chip: %d\n", ret);
3617 bus_params.dev_type = ATH10K_DEV_TYPE_LL;
3618 bus_params.link_can_suspend = true;
3619 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3620 if (bus_params.chip_id == 0xffffffff) {
3621 ath10k_err(ar, "failed to get chip id\n");
3625 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
3626 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3627 pdev->device, bus_params.chip_id);
3631 ret = ath10k_core_register(ar, &bus_params);
3633 ath10k_err(ar, "failed to register driver core: %d\n", ret);
3640 ath10k_pci_free_irq(ar);
3641 ath10k_pci_rx_retry_sync(ar);
3644 ath10k_pci_deinit_irq(ar);
3647 ath10k_pci_sleep_sync(ar);
3648 ath10k_pci_release(ar);
3651 ath10k_pci_free_pipes(ar);
3654 ath10k_core_destroy(ar);
3659 static void ath10k_pci_remove(struct pci_dev *pdev)
3661 struct ath10k *ar = pci_get_drvdata(pdev);
3662 struct ath10k_pci *ar_pci;
3664 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3669 ar_pci = ath10k_pci_priv(ar);
3674 ath10k_core_unregister(ar);
3675 ath10k_pci_free_irq(ar);
3676 ath10k_pci_deinit_irq(ar);
3677 ath10k_pci_release_resource(ar);
3678 ath10k_pci_sleep_sync(ar);
3679 ath10k_pci_release(ar);
3680 ath10k_core_destroy(ar);
3683 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3685 static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3687 struct ath10k *ar = dev_get_drvdata(dev);
3690 ret = ath10k_pci_suspend(ar);
3692 ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
3697 static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3699 struct ath10k *ar = dev_get_drvdata(dev);
3702 ret = ath10k_pci_resume(ar);
3704 ath10k_warn(ar, "failed to resume hif: %d\n", ret);
3709 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3710 ath10k_pci_pm_suspend,
3711 ath10k_pci_pm_resume);
3713 static struct pci_driver ath10k_pci_driver = {
3714 .name = "ath10k_pci",
3715 .id_table = ath10k_pci_id_table,
3716 .probe = ath10k_pci_probe,
3717 .remove = ath10k_pci_remove,
3719 .driver.pm = &ath10k_pci_pm_ops,
3723 static int __init ath10k_pci_init(void)
3727 ret = pci_register_driver(&ath10k_pci_driver);
3729 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3732 ret = ath10k_ahb_init();
3734 printk(KERN_ERR "ahb init failed: %d\n", ret);
3738 module_init(ath10k_pci_init);
3740 static void __exit ath10k_pci_exit(void)
3742 pci_unregister_driver(&ath10k_pci_driver);
3746 module_exit(ath10k_pci_exit);
3748 MODULE_AUTHOR("Qualcomm Atheros");
3749 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3750 MODULE_LICENSE("Dual BSD/GPL");
3752 /* QCA988x 2.0 firmware files */
3753 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3754 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3755 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3756 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3757 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3758 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3760 /* QCA9887 1.0 firmware files */
3761 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3762 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3763 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3765 /* QCA6174 2.1 firmware files */
3766 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3767 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3768 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3769 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3771 /* QCA6174 3.1 firmware files */
3772 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3773 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3774 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3775 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3776 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3778 /* QCA9377 1.0 firmware files */
3779 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
3780 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3781 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);