1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
6 #include <linux/module.h>
16 #define ATH11K_PCI_BAR_NUM 0
17 #define ATH11K_PCI_DMA_MASK 32
19 #define ATH11K_PCI_IRQ_CE0_OFFSET 3
21 #define WINDOW_ENABLE_BIT 0x40000000
22 #define WINDOW_REG_ADDRESS 0x310c
23 #define WINDOW_VALUE_MASK GENMASK(24, 19)
24 #define WINDOW_START 0x80000
25 #define WINDOW_RANGE_MASK GENMASK(18, 0)
27 #define TCSR_SOC_HW_VERSION 0x0224
28 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
29 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
31 /* BAR0 + 4k is always accessible, and no
32 * need to force wakeup.
35 #define ACCESS_ALWAYS_OFF 0xFE0
37 #define QCA6390_DEVICE_ID 0x1101
39 static const struct pci_device_id ath11k_pci_id_table[] = {
40 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
44 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
46 static const struct ath11k_bus_params ath11k_pci_bus_params = {
48 .m3_fw_support = true,
49 .fixed_bdf_addr = false,
50 .fixed_mem_region = false,
53 static const struct ath11k_msi_config msi_config = {
56 .users = (struct ath11k_msi_user[]) {
57 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
58 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
59 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
60 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
64 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
81 "host2reo-re-injection",
83 "host2rxdma-monitor-ring3",
84 "host2rxdma-monitor-ring2",
85 "host2rxdma-monitor-ring1",
87 "wbm2host-rx-release",
89 "reo2host-destination-ring4",
90 "reo2host-destination-ring3",
91 "reo2host-destination-ring2",
92 "reo2host-destination-ring1",
93 "rxdma2host-monitor-destination-mac3",
94 "rxdma2host-monitor-destination-mac2",
95 "rxdma2host-monitor-destination-mac1",
96 "ppdu-end-interrupts-mac3",
97 "ppdu-end-interrupts-mac2",
98 "ppdu-end-interrupts-mac1",
99 "rxdma2host-monitor-status-ring-mac3",
100 "rxdma2host-monitor-status-ring-mac2",
101 "rxdma2host-monitor-status-ring-mac1",
102 "host2rxdma-host-buf-ring-mac3",
103 "host2rxdma-host-buf-ring-mac2",
104 "host2rxdma-host-buf-ring-mac1",
105 "rxdma2host-destination-ring-mac3",
106 "rxdma2host-destination-ring-mac2",
107 "rxdma2host-destination-ring-mac1",
108 "host2tcl-input-ring4",
109 "host2tcl-input-ring3",
110 "host2tcl-input-ring2",
111 "host2tcl-input-ring1",
112 "wbm2host-tx-completions-ring3",
113 "wbm2host-tx-completions-ring2",
114 "wbm2host-tx-completions-ring1",
115 "tcl2host-status-ring",
118 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
120 struct ath11k_base *ab = ab_pci->ab;
122 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
124 lockdep_assert_held(&ab_pci->window_lock);
126 if (window != ab_pci->register_window) {
127 iowrite32(WINDOW_ENABLE_BIT | window,
128 ab->mem + WINDOW_REG_ADDRESS);
129 ab_pci->register_window = window;
133 void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
135 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
137 /* for offset beyond BAR + 4K - 32, may
138 * need to wakeup MHI to access.
140 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
141 offset >= ACCESS_ALWAYS_OFF)
142 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
144 if (offset < WINDOW_START) {
145 iowrite32(value, ab->mem + offset);
147 spin_lock_bh(&ab_pci->window_lock);
148 ath11k_pci_select_window(ab_pci, offset);
149 iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
150 spin_unlock_bh(&ab_pci->window_lock);
153 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
154 offset >= ACCESS_ALWAYS_OFF)
155 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
158 u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
160 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
163 /* for offset beyond BAR + 4K - 32, may
164 * need to wakeup MHI to access.
166 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
167 offset >= ACCESS_ALWAYS_OFF)
168 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
170 if (offset < WINDOW_START) {
171 val = ioread32(ab->mem + offset);
173 spin_lock_bh(&ab_pci->window_lock);
174 ath11k_pci_select_window(ab_pci, offset);
175 val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
176 spin_unlock_bh(&ab_pci->window_lock);
179 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
180 offset >= ACCESS_ALWAYS_OFF)
181 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
186 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
190 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
192 val |= PCIE_SOC_GLOBAL_RESET_V;
194 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
196 /* TODO: exact time to sleep is uncertain */
200 /* Need to toggle V bit back otherwise stuck in reset status */
201 val &= ~PCIE_SOC_GLOBAL_RESET_V;
203 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
207 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
208 if (val == 0xffffffff)
209 ath11k_warn(ab, "link down error during global reset\n");
212 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
217 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
218 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
220 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
221 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
223 /* TODO: exact time to sleep is uncertain */
226 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
227 * continuing warm path and entering dead loop.
229 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
232 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
233 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
235 /* A read clear register. clear the register to prevent
236 * Q6 from entering wrong code path.
238 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
239 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
242 static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
247 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
249 /* PCIE link seems very unstable after the Hot Reset*/
250 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
251 if (val == 0xffffffff)
254 ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
255 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
258 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
260 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
261 val |= GCC_GCC_PCIE_HOT_RST_VAL | 0x10;
262 ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
263 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
265 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
270 static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
272 /* This is a WAR for PCIE Hotreset.
273 * When target receive Hotreset, but will set the interrupt.
274 * So when download SBL again, SBL will open Interrupt and
275 * receive it, and crash immediately.
277 ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
280 static void ath11k_pci_force_wake(struct ath11k_base *ab)
282 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
286 static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
289 ath11k_pci_enable_ltssm(ab);
290 ath11k_pci_clear_all_intrs(ab);
293 ath11k_mhi_clear_vector(ab);
294 ath11k_pci_soc_global_reset(ab);
295 ath11k_mhi_set_mhictrl_reset(ab);
296 ath11k_pci_clear_dbg_registers(ab);
299 int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
301 struct pci_dev *pci_dev = to_pci_dev(dev);
303 return pci_irq_vector(pci_dev, vector);
306 static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
309 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
310 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
312 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
315 if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
316 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
323 int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
324 int *num_vectors, u32 *user_base_data,
327 struct ath11k_base *ab = ab_pci->ab;
330 for (idx = 0; idx < msi_config.total_users; idx++) {
331 if (strcmp(user_name, msi_config.users[idx].name) == 0) {
332 *num_vectors = msi_config.users[idx].num_vectors;
333 *user_base_data = msi_config.users[idx].base_vector
334 + ab_pci->msi_ep_base_data;
335 *base_vector = msi_config.users[idx].base_vector;
337 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
338 user_name, *num_vectors, *user_base_data,
345 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
350 static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
351 int *num_vectors, u32 *user_base_data,
354 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
356 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
357 num_vectors, user_base_data,
361 static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
365 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
366 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
368 for (j = 0; j < irq_grp->num_irq; j++)
369 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
371 netif_napi_del(&irq_grp->napi);
375 static void ath11k_pci_free_irq(struct ath11k_base *ab)
379 for (i = 0; i < ab->hw_params.ce_count; i++) {
380 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
382 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
383 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
386 ath11k_pci_free_ext_irq(ab);
389 static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
393 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
394 enable_irq(ab->irq_num[irq_idx]);
397 static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
401 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
402 disable_irq_nosync(ab->irq_num[irq_idx]);
405 static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
409 for (i = 0; i < ab->hw_params.ce_count; i++) {
410 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
412 ath11k_pci_ce_irq_disable(ab, i);
416 static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
421 for (i = 0; i < ab->hw_params.ce_count; i++) {
422 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
425 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
426 synchronize_irq(ab->irq_num[irq_idx]);
430 static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
432 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
434 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
436 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
439 static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
441 struct ath11k_ce_pipe *ce_pipe = arg;
443 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
444 tasklet_schedule(&ce_pipe->intr_tq);
449 static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
453 for (i = 0; i < irq_grp->num_irq; i++)
454 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
457 static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
461 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
462 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
464 ath11k_pci_ext_grp_disable(irq_grp);
466 napi_synchronize(&irq_grp->napi);
467 napi_disable(&irq_grp->napi);
471 static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
475 for (i = 0; i < irq_grp->num_irq; i++)
476 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
479 static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
483 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
484 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
486 napi_enable(&irq_grp->napi);
487 ath11k_pci_ext_grp_enable(irq_grp);
491 static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
495 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
496 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
498 for (j = 0; j < irq_grp->num_irq; j++) {
499 irq_idx = irq_grp->irqs[j];
500 synchronize_irq(ab->irq_num[irq_idx]);
505 static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
507 __ath11k_pci_ext_irq_disable(ab);
508 ath11k_pci_sync_ext_irqs(ab);
511 static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
513 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
514 struct ath11k_ext_irq_grp,
516 struct ath11k_base *ab = irq_grp->ab;
519 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
520 if (work_done < budget) {
521 napi_complete_done(napi, work_done);
522 ath11k_pci_ext_grp_enable(irq_grp);
525 if (work_done > budget)
531 static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
533 struct ath11k_ext_irq_grp *irq_grp = arg;
535 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
537 ath11k_pci_ext_grp_disable(irq_grp);
539 napi_schedule(&irq_grp->napi);
544 static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
546 int i, j, ret, num_vectors = 0;
547 u32 user_base_data = 0, base_vector = 0;
549 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
556 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
557 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
562 init_dummy_netdev(&irq_grp->napi_ndev);
563 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
564 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
566 if (ab->hw_params.ring_mask->tx[i] ||
567 ab->hw_params.ring_mask->rx[i] ||
568 ab->hw_params.ring_mask->rx_err[i] ||
569 ab->hw_params.ring_mask->rx_wbm_rel[i] ||
570 ab->hw_params.ring_mask->reo_status[i] ||
571 ab->hw_params.ring_mask->rxdma2host[i] ||
572 ab->hw_params.ring_mask->host2rxdma[i] ||
573 ab->hw_params.ring_mask->rx_mon_status[i]) {
577 irq_grp->num_irq = num_irq;
578 irq_grp->irqs[0] = base_vector + i;
580 for (j = 0; j < irq_grp->num_irq; j++) {
581 int irq_idx = irq_grp->irqs[j];
582 int vector = (i % num_vectors) + base_vector;
583 int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
585 ab->irq_num[irq_idx] = irq;
587 ath11k_dbg(ab, ATH11K_DBG_PCI,
588 "irq:%d group:%d\n", irq, i);
589 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
591 "DP_EXT_IRQ", irq_grp);
593 ath11k_err(ab, "failed request irq %d: %d\n",
598 disable_irq_nosync(ab->irq_num[irq_idx]);
605 static int ath11k_pci_config_irq(struct ath11k_base *ab)
607 struct ath11k_ce_pipe *ce_pipe;
611 unsigned int msi_data;
612 int irq, i, ret, irq_idx;
614 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
615 "CE", &msi_data_count,
616 &msi_data_start, &msi_irq_start);
620 /* Configure CE irqs */
621 for (i = 0; i < ab->hw_params.ce_count; i++) {
622 msi_data = (i % msi_data_count) + msi_irq_start;
623 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
624 ce_pipe = &ab->ce.ce_pipe[i];
626 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
629 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
631 tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
633 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
634 IRQF_SHARED, irq_name[irq_idx],
637 ath11k_err(ab, "failed to request irq %d: %d\n",
642 ab->irq_num[irq_idx] = irq;
643 ath11k_pci_ce_irq_disable(ab, i);
646 ret = ath11k_pci_ext_irq_config(ab);
653 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
655 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
657 cfg->tgt_ce = ab->hw_params.target_ce_config;
658 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
660 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
661 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
662 ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
664 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
665 &cfg->shadow_reg_v2_len);
668 static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
672 for (i = 0; i < ab->hw_params.ce_count; i++) {
673 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
675 ath11k_pci_ce_irq_enable(ab, i);
679 static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
681 struct ath11k_base *ab = ab_pci->ab;
682 struct msi_desc *msi_desc;
686 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
687 msi_config.total_vectors,
688 msi_config.total_vectors,
690 if (num_vectors != msi_config.total_vectors) {
691 ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
692 msi_config.total_vectors, num_vectors);
694 if (num_vectors >= 0)
700 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
702 ath11k_err(ab, "msi_desc is NULL!\n");
704 goto free_msi_vector;
707 ab_pci->msi_ep_base_data = msi_desc->msg.data;
708 if (msi_desc->msi_attrib.is_64)
709 set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
711 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
716 pci_free_irq_vectors(ab_pci->pdev);
721 static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
723 pci_free_irq_vectors(ab_pci->pdev);
726 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
728 struct ath11k_base *ab = ab_pci->ab;
732 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
733 if (device_id != ab_pci->dev_id) {
734 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
735 device_id, ab_pci->dev_id);
740 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
742 ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
746 ret = pci_enable_device(pdev);
748 ath11k_err(ab, "failed to enable pci device: %d\n", ret);
752 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
754 ath11k_err(ab, "failed to request pci region: %d\n", ret);
758 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
760 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
761 ATH11K_PCI_DMA_MASK, ret);
765 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
767 ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n",
768 ATH11K_PCI_DMA_MASK, ret);
772 pci_set_master(pdev);
774 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
775 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
777 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
782 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
786 pci_clear_master(pdev);
788 pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
790 pci_disable_device(pdev);
795 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
797 struct ath11k_base *ab = ab_pci->ab;
798 struct pci_dev *pci_dev = ab_pci->pdev;
800 pci_iounmap(pci_dev, ab->mem);
802 pci_clear_master(pci_dev);
803 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
804 if (pci_is_enabled(pci_dev))
805 pci_disable_device(pci_dev);
808 static int ath11k_pci_power_up(struct ath11k_base *ab)
810 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
813 ab_pci->register_window = 0;
814 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
815 ath11k_pci_sw_reset(ab_pci->ab, true);
817 ret = ath11k_mhi_start(ab_pci);
819 ath11k_err(ab, "failed to start mhi: %d\n", ret);
826 static void ath11k_pci_power_down(struct ath11k_base *ab)
828 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
830 ath11k_pci_force_wake(ab_pci->ab);
831 ath11k_mhi_stop(ab_pci);
832 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
833 ath11k_pci_sw_reset(ab_pci->ab, false);
836 static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
840 for (i = 0; i < ab->hw_params.ce_count; i++) {
841 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
843 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
846 tasklet_kill(&ce_pipe->intr_tq);
850 static void ath11k_pci_stop(struct ath11k_base *ab)
852 ath11k_pci_ce_irqs_disable(ab);
853 ath11k_pci_sync_ce_irqs(ab);
854 ath11k_pci_kill_tasklets(ab);
855 ath11k_ce_cleanup_pipes(ab);
858 static int ath11k_pci_start(struct ath11k_base *ab)
860 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
862 set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
864 ath11k_pci_ce_irqs_enable(ab);
865 ath11k_ce_rx_post_buf(ab);
870 static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
871 u8 *ul_pipe, u8 *dl_pipe)
873 const struct service_to_pipe *entry;
874 bool ul_set = false, dl_set = false;
877 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
878 entry = &ab->hw_params.svc_to_ce_map[i];
880 if (__le32_to_cpu(entry->service_id) != service_id)
883 switch (__le32_to_cpu(entry->pipedir)) {
888 *dl_pipe = __le32_to_cpu(entry->pipenum);
893 *ul_pipe = __le32_to_cpu(entry->pipenum);
899 *dl_pipe = __le32_to_cpu(entry->pipenum);
900 *ul_pipe = __le32_to_cpu(entry->pipenum);
907 if (WARN_ON(!ul_set || !dl_set))
913 static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
914 .start = ath11k_pci_start,
915 .stop = ath11k_pci_stop,
916 .read32 = ath11k_pci_read32,
917 .write32 = ath11k_pci_write32,
918 .power_down = ath11k_pci_power_down,
919 .power_up = ath11k_pci_power_up,
920 .irq_enable = ath11k_pci_ext_irq_enable,
921 .irq_disable = ath11k_pci_ext_irq_disable,
922 .get_msi_address = ath11k_pci_get_msi_address,
923 .get_user_msi_vector = ath11k_get_user_msi_assignment,
924 .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
927 static int ath11k_pci_probe(struct pci_dev *pdev,
928 const struct pci_device_id *pci_dev)
930 struct ath11k_base *ab;
931 struct ath11k_pci *ab_pci;
932 u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
935 dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
937 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
938 &ath11k_pci_bus_params);
940 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
944 ab->dev = &pdev->dev;
945 pci_set_drvdata(pdev, ab);
946 ab_pci = ath11k_pci_priv(ab);
947 ab_pci->dev_id = pci_dev->device;
950 ab->hif.ops = &ath11k_pci_hif_ops;
951 pci_set_drvdata(pdev, ab);
952 spin_lock_init(&ab_pci->window_lock);
954 ret = ath11k_pci_claim(ab_pci, pdev);
956 ath11k_err(ab, "failed to claim device: %d\n", ret);
960 switch (pci_dev->device) {
961 case QCA6390_DEVICE_ID:
962 soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
963 soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
965 soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
968 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
969 soc_hw_version_major, soc_hw_version_minor);
971 switch (soc_hw_version_major) {
973 ab->hw_rev = ATH11K_HW_QCA6390_HW20;
976 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
977 soc_hw_version_major, soc_hw_version_minor);
979 goto err_pci_free_region;
983 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
986 goto err_pci_free_region;
989 ret = ath11k_pci_enable_msi(ab_pci);
991 ath11k_err(ab, "failed to enable msi: %d\n", ret);
992 goto err_pci_free_region;
995 ret = ath11k_core_pre_init(ab);
997 goto err_pci_disable_msi;
999 ret = ath11k_mhi_register(ab_pci);
1001 ath11k_err(ab, "failed to register mhi: %d\n", ret);
1002 goto err_pci_disable_msi;
1005 ret = ath11k_hal_srng_init(ab);
1007 goto err_mhi_unregister;
1009 ret = ath11k_ce_alloc_pipes(ab);
1011 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1012 goto err_hal_srng_deinit;
1015 ath11k_pci_init_qmi_ce_config(ab);
1017 ret = ath11k_pci_config_irq(ab);
1019 ath11k_err(ab, "failed to config irq: %d\n", ret);
1023 ret = ath11k_core_init(ab);
1025 ath11k_err(ab, "failed to init core: %d\n", ret);
1031 ath11k_pci_free_irq(ab);
1034 ath11k_ce_free_pipes(ab);
1036 err_hal_srng_deinit:
1037 ath11k_hal_srng_deinit(ab);
1040 ath11k_mhi_unregister(ab_pci);
1042 err_pci_disable_msi:
1043 ath11k_pci_disable_msi(ab_pci);
1045 err_pci_free_region:
1046 ath11k_pci_free_region(ab_pci);
1049 ath11k_core_free(ab);
1054 static void ath11k_pci_remove(struct pci_dev *pdev)
1056 struct ath11k_base *ab = pci_get_drvdata(pdev);
1057 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1059 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1060 ath11k_pci_power_down(ab);
1061 ath11k_debugfs_soc_destroy(ab);
1062 ath11k_qmi_deinit_service(ab);
1066 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1068 ath11k_core_deinit(ab);
1071 ath11k_mhi_unregister(ab_pci);
1073 ath11k_pci_free_irq(ab);
1074 ath11k_pci_disable_msi(ab_pci);
1075 ath11k_pci_free_region(ab_pci);
1077 ath11k_hal_srng_deinit(ab);
1078 ath11k_ce_free_pipes(ab);
1079 ath11k_core_free(ab);
1082 static void ath11k_pci_shutdown(struct pci_dev *pdev)
1084 struct ath11k_base *ab = pci_get_drvdata(pdev);
1086 ath11k_pci_power_down(ab);
1089 static struct pci_driver ath11k_pci_driver = {
1090 .name = "ath11k_pci",
1091 .id_table = ath11k_pci_id_table,
1092 .probe = ath11k_pci_probe,
1093 .remove = ath11k_pci_remove,
1094 .shutdown = ath11k_pci_shutdown,
1097 static int ath11k_pci_init(void)
1101 ret = pci_register_driver(&ath11k_pci_driver);
1103 pr_err("failed to register ath11k pci driver: %d\n",
1108 module_init(ath11k_pci_init);
1110 static void ath11k_pci_exit(void)
1112 pci_unregister_driver(&ath11k_pci_driver);
1115 module_exit(ath11k_pci_exit);
1117 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
1118 MODULE_LICENSE("Dual BSD/GPL");
1120 /* QCA639x 2.0 firmware files */
1121 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_BOARD_API2_FILE);
1122 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_AMSS_FILE);
1123 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_M3_FILE);