1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bitfield.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/skbuff.h>
10 #include <linux/of_platform.h>
11 #include <linux/of_address.h>
12 #include <linux/mfd/syscon.h>
13 #include <linux/debugfs.h>
14 #include <linux/soc/mediatek/mtk_wed.h>
15 #include "mtk_eth_soc.h"
16 #include "mtk_wed_regs.h"
20 #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
22 #define MTK_WED_PKT_SIZE 1900
23 #define MTK_WED_BUF_SIZE 2048
24 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
26 #define MTK_WED_TX_RING_SIZE 2048
27 #define MTK_WED_WDMA_RING_SIZE 1024
29 static struct mtk_wed_hw *hw_list[2];
30 static DEFINE_MUTEX(hw_lock);
33 wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
35 regmap_update_bits(dev->hw->regs, reg, mask | val, val);
39 wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
41 return wed_m32(dev, reg, 0, mask);
45 wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
47 return wed_m32(dev, reg, mask, 0);
51 wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
53 wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
57 wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
59 wdma_m32(dev, reg, 0, mask);
63 mtk_wed_read_reset(struct mtk_wed_device *dev)
65 return wed_r32(dev, MTK_WED_RESET);
69 mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
73 wed_w32(dev, MTK_WED_RESET, mask);
74 if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
75 !(status & mask), 0, 1000))
79 static struct mtk_wed_hw *
80 mtk_wed_assign(struct mtk_wed_device *dev)
82 struct mtk_wed_hw *hw;
84 hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
85 if (!hw || hw->wed_dev)
93 mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
95 struct mtk_wdma_desc *desc;
98 int token = dev->wlan.token_start;
103 ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
104 n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
106 page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
110 dev->buf_ring.size = ring_size;
111 dev->buf_ring.pages = page_list;
113 desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
114 &desc_phys, GFP_KERNEL);
118 dev->buf_ring.desc = desc;
119 dev->buf_ring.desc_phys = desc_phys;
121 for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
122 dma_addr_t page_phys, buf_phys;
127 page = __dev_alloc_pages(GFP_KERNEL, 0);
131 page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
133 if (dma_mapping_error(dev->hw->dev, page_phys)) {
138 page_list[page_idx++] = page;
139 dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
142 buf = page_to_virt(page);
143 buf_phys = page_phys;
145 for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
148 txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
150 desc->buf0 = buf_phys;
151 desc->buf1 = buf_phys + txd_size;
152 desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
154 FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
155 MTK_WED_BUF_SIZE - txd_size) |
156 MTK_WDMA_DESC_CTRL_LAST_SEG1;
160 buf += MTK_WED_BUF_SIZE;
161 buf_phys += MTK_WED_BUF_SIZE;
164 dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
172 mtk_wed_free_buffer(struct mtk_wed_device *dev)
174 struct mtk_wdma_desc *desc = dev->buf_ring.desc;
175 void **page_list = dev->buf_ring.pages;
185 for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
186 void *page = page_list[page_idx++];
191 dma_unmap_page(dev->hw->dev, desc[i].buf0,
192 PAGE_SIZE, DMA_BIDIRECTIONAL);
196 dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
197 desc, dev->buf_ring.desc_phys);
204 mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
209 dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
210 ring->desc, ring->desc_phys);
214 mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
218 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
219 mtk_wed_free_ring(dev, &dev->tx_ring[i]);
220 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
221 mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
225 mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
227 u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
229 if (!dev->hw->num_flows)
230 mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
232 wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
233 wed_r32(dev, MTK_WED_EXT_INT_MASK);
237 mtk_wed_stop(struct mtk_wed_device *dev)
239 regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
240 mtk_wed_set_ext_int(dev, false);
242 wed_clr(dev, MTK_WED_CTRL,
243 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
244 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
245 MTK_WED_CTRL_WED_TX_BM_EN |
246 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
247 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
248 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
249 wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
250 wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
251 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
253 wed_clr(dev, MTK_WED_GLO_CFG,
254 MTK_WED_GLO_CFG_TX_DMA_EN |
255 MTK_WED_GLO_CFG_RX_DMA_EN);
256 wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
257 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
258 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
259 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
260 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
264 mtk_wed_detach(struct mtk_wed_device *dev)
266 struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
267 struct mtk_wed_hw *hw = dev->hw;
269 mutex_lock(&hw_lock);
273 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
274 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
276 mtk_wed_reset(dev, MTK_WED_RESET_WED);
278 mtk_wed_free_buffer(dev);
279 mtk_wed_free_tx_rings(dev);
281 if (of_dma_is_coherent(wlan_node))
282 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
283 BIT(hw->index), BIT(hw->index));
285 if (!hw_list[!hw->index]->wed_dev &&
286 hw->eth->dma_dev != hw->eth->dev)
287 mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
289 memset(dev, 0, sizeof(*dev));
290 module_put(THIS_MODULE);
293 mutex_unlock(&hw_lock);
297 mtk_wed_hw_init_early(struct mtk_wed_device *dev)
303 mtk_wed_reset(dev, MTK_WED_RESET_WED);
305 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
306 MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
307 MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
308 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
309 MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
310 MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
311 wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
313 wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
315 offset = dev->hw->index ? 0x04000400 : 0;
316 wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
317 wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
319 wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
320 wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
324 mtk_wed_hw_init(struct mtk_wed_device *dev)
329 dev->init_done = true;
330 mtk_wed_set_ext_int(dev, false);
331 wed_w32(dev, MTK_WED_TX_BM_CTRL,
332 MTK_WED_TX_BM_CTRL_PAUSE |
333 FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
334 dev->buf_ring.size / 128) |
335 FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
336 MTK_WED_TX_RING_SIZE / 256));
338 wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
340 wed_w32(dev, MTK_WED_TX_BM_TKID,
341 FIELD_PREP(MTK_WED_TX_BM_TKID_START,
342 dev->wlan.token_start) |
343 FIELD_PREP(MTK_WED_TX_BM_TKID_END,
344 dev->wlan.token_start + dev->wlan.nbuf - 1));
346 wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
348 wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
349 FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
350 MTK_WED_TX_BM_DYN_THR_HI);
352 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
354 wed_set(dev, MTK_WED_CTRL,
355 MTK_WED_CTRL_WED_TX_BM_EN |
356 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
358 wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
362 mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
366 for (i = 0; i < size; i++) {
368 desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
375 mtk_wed_check_busy(struct mtk_wed_device *dev)
377 if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
380 if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
381 MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
384 if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
387 if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
388 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
391 if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
392 MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
395 if (wed_r32(dev, MTK_WED_CTRL) &
396 (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
403 mtk_wed_poll_busy(struct mtk_wed_device *dev)
406 int timeout = 100 * sleep;
409 return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
410 timeout, false, dev);
414 mtk_wed_reset_dma(struct mtk_wed_device *dev)
420 for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
421 struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
426 mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
429 if (mtk_wed_poll_busy(dev))
430 busy = mtk_wed_check_busy(dev);
433 mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
435 wed_w32(dev, MTK_WED_RESET_IDX,
436 MTK_WED_RESET_IDX_TX |
437 MTK_WED_RESET_IDX_RX);
438 wed_w32(dev, MTK_WED_RESET_IDX, 0);
441 wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
442 wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
445 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
446 mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
448 wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
449 MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
450 wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
452 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
453 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
455 wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
456 MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
459 for (i = 0; i < 100; i++) {
460 val = wed_r32(dev, MTK_WED_TX_BM_INTF);
461 if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
465 mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
466 mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
469 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
470 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
471 mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
473 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
474 MTK_WED_WPDMA_RESET_IDX_TX |
475 MTK_WED_WPDMA_RESET_IDX_RX);
476 wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
482 mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
485 ring->desc = dma_alloc_coherent(dev->hw->dev,
486 size * sizeof(*ring->desc),
487 &ring->desc_phys, GFP_KERNEL);
492 mtk_wed_ring_reset(ring->desc, size);
498 mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
500 struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
502 if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
505 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
507 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
509 wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
511 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
513 wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
520 mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
526 for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
527 if (!dev->tx_wdma[i].desc)
528 mtk_wed_wdma_ring_setup(dev, i, 16);
530 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
532 mtk_wed_hw_init(dev);
534 wed_set(dev, MTK_WED_CTRL,
535 MTK_WED_CTRL_WDMA_INT_AGENT_EN |
536 MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
537 MTK_WED_CTRL_WED_TX_BM_EN |
538 MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
540 wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
542 wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
543 MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
544 MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
546 wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
547 MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
549 wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
550 wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
552 wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
553 wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
555 wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
556 wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
558 wed_set(dev, MTK_WED_GLO_CFG,
559 MTK_WED_GLO_CFG_TX_DMA_EN |
560 MTK_WED_GLO_CFG_RX_DMA_EN);
561 wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
562 MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
563 MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
564 wed_set(dev, MTK_WED_WDMA_GLO_CFG,
565 MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
567 mtk_wed_set_ext_int(dev, true);
568 val = dev->wlan.wpdma_phys |
569 MTK_PCIE_MIRROR_MAP_EN |
570 FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
575 regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
581 mtk_wed_attach(struct mtk_wed_device *dev)
584 struct mtk_wed_hw *hw;
587 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
588 "mtk_wed_attach without holding the RCU read lock");
590 if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
591 !try_module_get(THIS_MODULE))
599 mutex_lock(&hw_lock);
601 hw = mtk_wed_assign(dev);
603 module_put(THIS_MODULE);
608 dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
613 dev->wdma_idx = hw->index;
615 if (hw->eth->dma_dev == hw->eth->dev &&
616 of_dma_is_coherent(hw->eth->dev->of_node))
617 mtk_eth_set_dma_device(hw->eth, hw->dev);
619 ret = mtk_wed_buffer_alloc(dev);
625 mtk_wed_hw_init_early(dev);
626 regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
629 mutex_unlock(&hw_lock);
635 mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
637 struct mtk_wed_ring *ring = &dev->tx_ring[idx];
640 * Tx ring redirection:
641 * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
642 * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
645 * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
646 * into MTK_WED_WPDMA_RING_TX(n) registers.
647 * It gets filled with packets picked up from WED TX ring and from
651 BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
653 if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
656 if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
659 ring->reg_base = MTK_WED_RING_TX(idx);
663 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
664 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
665 wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
667 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
669 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
670 MTK_WED_TX_RING_SIZE);
671 wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
677 mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
679 struct mtk_wed_ring *ring = &dev->txfree_ring;
683 * For txfree event handling, the same DMA ring is shared between WED
684 * and WLAN. The WLAN driver accesses the ring index registers through
687 ring->reg_base = MTK_WED_RING_RX(1);
690 for (i = 0; i < 12; i += 4) {
691 u32 val = readl(regs + i);
693 wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
694 wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
701 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
705 val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
706 wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
707 val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
708 if (!dev->hw->num_flows)
709 val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
710 if (val && net_ratelimit())
711 pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
713 val = wed_r32(dev, MTK_WED_INT_STATUS);
715 wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
721 mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
726 mtk_wed_set_ext_int(dev, !!mask);
727 wed_w32(dev, MTK_WED_INT_MASK, mask);
730 int mtk_wed_flow_add(int index)
732 struct mtk_wed_hw *hw = hw_list[index];
735 if (!hw || !hw->wed_dev)
743 mutex_lock(&hw_lock);
749 ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
752 mtk_wed_set_ext_int(hw->wed_dev, true);
755 mutex_unlock(&hw_lock);
760 void mtk_wed_flow_remove(int index)
762 struct mtk_wed_hw *hw = hw_list[index];
770 mutex_lock(&hw_lock);
774 hw->wed_dev->wlan.offload_disable(hw->wed_dev);
775 mtk_wed_set_ext_int(hw->wed_dev, true);
778 mutex_unlock(&hw_lock);
781 void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
782 void __iomem *wdma, int index)
784 static const struct mtk_wed_ops wed_ops = {
785 .attach = mtk_wed_attach,
786 .tx_ring_setup = mtk_wed_tx_ring_setup,
787 .txfree_ring_setup = mtk_wed_txfree_ring_setup,
788 .start = mtk_wed_start,
789 .stop = mtk_wed_stop,
790 .reset_dma = mtk_wed_reset_dma,
792 .reg_write = wed_w32,
793 .irq_get = mtk_wed_irq_get,
794 .irq_set_mask = mtk_wed_irq_set_mask,
795 .detach = mtk_wed_detach,
797 struct device_node *eth_np = eth->dev->of_node;
798 struct platform_device *pdev;
799 struct mtk_wed_hw *hw;
806 pdev = of_find_device_by_node(np);
810 get_device(&pdev->dev);
811 irq = platform_get_irq(pdev, 0);
815 regs = syscon_regmap_lookup_by_phandle(np, NULL);
819 rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
821 mutex_lock(&hw_lock);
823 if (WARN_ON(hw_list[index]))
826 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
830 hw->dev = &pdev->dev;
834 hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
835 "mediatek,pcie-mirror");
836 hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
838 if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
844 regmap_write(hw->mirror, 0, 0);
845 regmap_write(hw->mirror, 4, 0);
847 mtk_wed_hw_add_debugfs(hw);
852 mutex_unlock(&hw_lock);
855 void mtk_wed_exit(void)
859 rcu_assign_pointer(mtk_soc_wed_ops, NULL);
863 for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
864 struct mtk_wed_hw *hw;
871 debugfs_remove(hw->debugfs_dir);