1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bitfield.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/of_platform.h>
11 #include <linux/of_address.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/debugfs.h>
14 #include <linux/soc/mediatek/mtk_wed.h>
15 #include "mtk_eth_soc.h"
16 #include "mtk_wed_regs.h"
20 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
22 #define MTK_WED_PKT_SIZE 1900
23 #define MTK_WED_BUF_SIZE 2048
24 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
26 #define MTK_WED_TX_RING_SIZE 2048
27 #define MTK_WED_WDMA_RING_SIZE 1024
29 static struct mtk_wed_hw *hw_list[2];
30 static DEFINE_MUTEX(hw_lock);
33 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
35 regmap_update_bits(dev->hw->regs, reg, mask | val, val);
39 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
41 return wed_m32(dev, reg, 0, mask);
45 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
47 return wed_m32(dev, reg, mask, 0);
51 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
53 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
57 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
59 wdma_m32(dev, reg, 0, mask);
63 mtk_wed_read_reset(struct mtk_wed_device *dev)
65 return wed_r32(dev, MTK_WED_RESET);
69 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
73 wed_w32(dev, MTK_WED_RESET, mask);
74 if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
75 !(status & mask), 0, 1000))
79 static struct mtk_wed_hw *
80 mtk_wed_assign(struct mtk_wed_device *dev)
82 struct mtk_wed_hw *hw;
84 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
85 if (!hw || hw->wed_dev)
93 mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
95 struct mtk_wdma_desc *desc;
98 int token = dev->wlan.token_start;
103 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
104 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
106 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
110 dev->buf_ring.size = ring_size;
111 dev->buf_ring.pages = page_list;
113 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
114 &desc_phys, GFP_KERNEL);
118 dev->buf_ring.desc = desc;
119 dev->buf_ring.desc_phys = desc_phys;
121 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
122 dma_addr_t page_phys, buf_phys;
127 page = __dev_alloc_pages(GFP_KERNEL, 0);
131 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
133 if (dma_mapping_error(dev->hw->dev, page_phys)) {
138 page_list[page_idx++] = page;
139 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
142 buf = page_to_virt(page);
143 buf_phys = page_phys;
145 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
149 txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
151 desc->buf0 = cpu_to_le32(buf_phys);
152 desc->buf1 = cpu_to_le32(buf_phys + txd_size);
153 ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
154 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
155 MTK_WED_BUF_SIZE - txd_size) |
156 MTK_WDMA_DESC_CTRL_LAST_SEG1;
157 desc->ctrl = cpu_to_le32(ctrl);
161 buf += MTK_WED_BUF_SIZE;
162 buf_phys += MTK_WED_BUF_SIZE;
165 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
173 mtk_wed_free_buffer(struct mtk_wed_device *dev)
175 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
176 void **page_list = dev->buf_ring.pages;
186 for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
187 void *page = page_list[page_idx++];
193 buf_addr = le32_to_cpu(desc[i].buf0);
194 dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
199 dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
200 desc, dev->buf_ring.desc_phys);
207 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
212 dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
213 ring->desc, ring->desc_phys);
217 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
221 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
222 mtk_wed_free_ring(dev, &dev->tx_ring[i]);
223 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
224 mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
228 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
230 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
232 if (!dev->hw->num_flows)
233 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
235 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
236 wed_r32(dev, MTK_WED_EXT_INT_MASK);
240 mtk_wed_stop(struct mtk_wed_device *dev)
242 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
243 mtk_wed_set_ext_int(dev, false);
245 wed_clr(dev, MTK_WED_CTRL,
246 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
247 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
248 MTK_WED_CTRL_WED_TX_BM_EN |
249 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
250 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
251 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
252 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
253 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
254 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
256 wed_clr(dev, MTK_WED_GLO_CFG,
257 MTK_WED_GLO_CFG_TX_DMA_EN |
258 MTK_WED_GLO_CFG_RX_DMA_EN);
259 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
260 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
261 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
262 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
263 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
267 mtk_wed_detach(struct mtk_wed_device *dev)
269 struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
270 struct mtk_wed_hw *hw = dev->hw;
272 mutex_lock(&hw_lock);
276 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
277 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
279 mtk_wed_reset(dev, MTK_WED_RESET_WED);
281 mtk_wed_free_buffer(dev);
282 mtk_wed_free_tx_rings(dev);
284 if (of_dma_is_coherent(wlan_node))
285 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
286 BIT(hw->index), BIT(hw->index));
288 if (!hw_list[!hw->index]->wed_dev &&
289 hw->eth->dma_dev != hw->eth->dev)
290 mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
292 memset(dev, 0, sizeof(*dev));
293 module_put(THIS_MODULE);
296 mutex_unlock(&hw_lock);
300 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
306 mtk_wed_reset(dev, MTK_WED_RESET_WED);
308 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
309 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
310 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
311 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
312 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
313 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
314 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
316 wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
318 offset = dev->hw->index ? 0x04000400 : 0;
319 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
320 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
322 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
323 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
327 mtk_wed_hw_init(struct mtk_wed_device *dev)
332 dev->init_done = true;
333 mtk_wed_set_ext_int(dev, false);
334 wed_w32(dev, MTK_WED_TX_BM_CTRL,
335 MTK_WED_TX_BM_CTRL_PAUSE |
336 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
337 dev->buf_ring.size / 128) |
338 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
339 MTK_WED_TX_RING_SIZE / 256));
341 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
343 wed_w32(dev, MTK_WED_TX_BM_TKID,
344 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
345 dev->wlan.token_start) |
346 FIELD_PREP(MTK_WED_TX_BM_TKID_END,
347 dev->wlan.token_start + dev->wlan.nbuf - 1));
349 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
351 wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
352 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
353 MTK_WED_TX_BM_DYN_THR_HI);
355 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
357 wed_set(dev, MTK_WED_CTRL,
358 MTK_WED_CTRL_WED_TX_BM_EN |
359 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
361 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
365 mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
369 for (i = 0; i < size; i++) {
371 desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
378 mtk_wed_check_busy(struct mtk_wed_device *dev)
380 if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
383 if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
384 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
387 if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
390 if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
391 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
394 if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
395 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
398 if (wed_r32(dev, MTK_WED_CTRL) &
399 (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
406 mtk_wed_poll_busy(struct mtk_wed_device *dev)
409 int timeout = 100 * sleep;
412 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
413 timeout, false, dev);
417 mtk_wed_reset_dma(struct mtk_wed_device *dev)
423 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
424 struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
429 mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
432 if (mtk_wed_poll_busy(dev))
433 busy = mtk_wed_check_busy(dev);
436 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
438 wed_w32(dev, MTK_WED_RESET_IDX,
439 MTK_WED_RESET_IDX_TX |
440 MTK_WED_RESET_IDX_RX);
441 wed_w32(dev, MTK_WED_RESET_IDX, 0);
444 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
445 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
448 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
449 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
451 wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
452 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
453 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
455 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
456 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
458 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
459 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
462 for (i = 0; i < 100; i++) {
463 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
464 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
468 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
469 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
472 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
473 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
474 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
476 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
477 MTK_WED_WPDMA_RESET_IDX_TX |
478 MTK_WED_WPDMA_RESET_IDX_RX);
479 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
485 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
488 ring->desc = dma_alloc_coherent(dev->hw->dev,
489 size * sizeof(*ring->desc),
490 &ring->desc_phys, GFP_KERNEL);
495 mtk_wed_ring_reset(ring->desc, size);
501 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
503 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
505 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
508 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
510 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
512 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
514 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
516 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
523 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
529 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
530 if (!dev->tx_wdma[i].desc)
531 mtk_wed_wdma_ring_setup(dev, i, 16);
533 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
535 mtk_wed_hw_init(dev);
537 wed_set(dev, MTK_WED_CTRL,
538 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
539 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
540 MTK_WED_CTRL_WED_TX_BM_EN |
541 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
543 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
545 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
546 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
547 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
549 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
550 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
552 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
553 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
555 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
556 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
558 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
559 wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
561 wed_set(dev, MTK_WED_GLO_CFG,
562 MTK_WED_GLO_CFG_TX_DMA_EN |
563 MTK_WED_GLO_CFG_RX_DMA_EN);
564 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
565 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
566 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
567 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
568 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
570 mtk_wed_set_ext_int(dev, true);
571 val = dev->wlan.wpdma_phys |
572 MTK_PCIE_MIRROR_MAP_EN |
573 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
578 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
584 mtk_wed_attach(struct mtk_wed_device *dev)
587 struct mtk_wed_hw *hw;
590 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
591 "mtk_wed_attach without holding the RCU read lock");
593 if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
594 !try_module_get(THIS_MODULE))
602 mutex_lock(&hw_lock);
604 hw = mtk_wed_assign(dev);
606 module_put(THIS_MODULE);
611 dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
616 dev->wdma_idx = hw->index;
618 if (hw->eth->dma_dev == hw->eth->dev &&
619 of_dma_is_coherent(hw->eth->dev->of_node))
620 mtk_eth_set_dma_device(hw->eth, hw->dev);
622 ret = mtk_wed_buffer_alloc(dev);
628 mtk_wed_hw_init_early(dev);
629 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
632 mutex_unlock(&hw_lock);
638 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
640 struct mtk_wed_ring *ring = &dev->tx_ring[idx];
643 * Tx ring redirection:
644 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
645 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
648 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
649 * into MTK_WED_WPDMA_RING_TX(n) registers.
650 * It gets filled with packets picked up from WED TX ring and from
654 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
656 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
659 if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
662 ring->reg_base = MTK_WED_RING_TX(idx);
666 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
667 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
668 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
670 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
672 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
673 MTK_WED_TX_RING_SIZE);
674 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
680 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
682 struct mtk_wed_ring *ring = &dev->txfree_ring;
686 * For txfree event handling, the same DMA ring is shared between WED
687 * and WLAN. The WLAN driver accesses the ring index registers through
690 ring->reg_base = MTK_WED_RING_RX(1);
693 for (i = 0; i < 12; i += 4) {
694 u32 val = readl(regs + i);
696 wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
697 wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
704 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
708 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
709 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
710 val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
711 if (!dev->hw->num_flows)
712 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
713 if (val && net_ratelimit())
714 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
716 val = wed_r32(dev, MTK_WED_INT_STATUS);
718 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
724 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
729 mtk_wed_set_ext_int(dev, !!mask);
730 wed_w32(dev, MTK_WED_INT_MASK, mask);
733 int mtk_wed_flow_add(int index)
735 struct mtk_wed_hw *hw = hw_list[index];
738 if (!hw || !hw->wed_dev)
746 mutex_lock(&hw_lock);
752 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
755 mtk_wed_set_ext_int(hw->wed_dev, true);
758 mutex_unlock(&hw_lock);
763 void mtk_wed_flow_remove(int index)
765 struct mtk_wed_hw *hw = hw_list[index];
773 mutex_lock(&hw_lock);
777 hw->wed_dev->wlan.offload_disable(hw->wed_dev);
778 mtk_wed_set_ext_int(hw->wed_dev, true);
781 mutex_unlock(&hw_lock);
784 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
785 void __iomem *wdma, int index)
787 static const struct mtk_wed_ops wed_ops = {
788 .attach = mtk_wed_attach,
789 .tx_ring_setup = mtk_wed_tx_ring_setup,
790 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
791 .start = mtk_wed_start,
792 .stop = mtk_wed_stop,
793 .reset_dma = mtk_wed_reset_dma,
795 .reg_write = wed_w32,
796 .irq_get = mtk_wed_irq_get,
797 .irq_set_mask = mtk_wed_irq_set_mask,
798 .detach = mtk_wed_detach,
800 struct device_node *eth_np = eth->dev->of_node;
801 struct platform_device *pdev;
802 struct mtk_wed_hw *hw;
809 pdev = of_find_device_by_node(np);
813 get_device(&pdev->dev);
814 irq = platform_get_irq(pdev, 0);
818 regs = syscon_regmap_lookup_by_phandle(np, NULL);
822 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
824 mutex_lock(&hw_lock);
826 if (WARN_ON(hw_list[index]))
829 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
835 hw->dev = &pdev->dev;
839 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
840 "mediatek,pcie-mirror");
841 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
843 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
849 regmap_write(hw->mirror, 0, 0);
850 regmap_write(hw->mirror, 4, 0);
852 mtk_wed_hw_add_debugfs(hw);
857 mutex_unlock(&hw_lock);
860 void mtk_wed_exit(void)
864 rcu_assign_pointer(mtk_soc_wed_ops, NULL);
868 for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
869 struct mtk_wed_hw *hw;
876 debugfs_remove(hw->debugfs_dir);