1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
8 int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
12 err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
16 for (i = 0; i <= MT_TXQ_PSD; i++)
17 phy->mt76->q_tx[i] = phy->mt76->q_tx[0];
22 static int mt7921_poll_tx(struct napi_struct *napi, int budget)
24 struct mt7921_dev *dev;
26 dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
28 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
30 queue_work(dev->mt76.wq, &dev->pm.wake_work);
34 mt7921_mcu_tx_cleanup(dev);
35 if (napi_complete(napi))
36 mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
37 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
42 static int mt7921_poll_rx(struct napi_struct *napi, int budget)
44 struct mt7921_dev *dev;
47 dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev);
49 if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
51 queue_work(dev->mt76.wq, &dev->pm.wake_work);
54 done = mt76_dma_rx_poll(napi, budget);
55 mt76_connac_pm_unref(&dev->mphy, &dev->pm);
60 static void mt7921_dma_prefetch(struct mt7921_dev *dev)
62 #define PREFETCH(base, depth) ((base) << 16 | (depth))
64 mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL, PREFETCH(0x0, 0x4));
65 mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL, PREFETCH(0x40, 0x4));
66 mt76_wr(dev, MT_WFDMA0_RX_RING3_EXT_CTRL, PREFETCH(0x80, 0x4));
67 mt76_wr(dev, MT_WFDMA0_RX_RING4_EXT_CTRL, PREFETCH(0xc0, 0x4));
68 mt76_wr(dev, MT_WFDMA0_RX_RING5_EXT_CTRL, PREFETCH(0x100, 0x4));
70 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, PREFETCH(0x140, 0x4));
71 mt76_wr(dev, MT_WFDMA0_TX_RING1_EXT_CTRL, PREFETCH(0x180, 0x4));
72 mt76_wr(dev, MT_WFDMA0_TX_RING2_EXT_CTRL, PREFETCH(0x1c0, 0x4));
73 mt76_wr(dev, MT_WFDMA0_TX_RING3_EXT_CTRL, PREFETCH(0x200, 0x4));
74 mt76_wr(dev, MT_WFDMA0_TX_RING4_EXT_CTRL, PREFETCH(0x240, 0x4));
75 mt76_wr(dev, MT_WFDMA0_TX_RING5_EXT_CTRL, PREFETCH(0x280, 0x4));
76 mt76_wr(dev, MT_WFDMA0_TX_RING6_EXT_CTRL, PREFETCH(0x2c0, 0x4));
77 mt76_wr(dev, MT_WFDMA0_TX_RING16_EXT_CTRL, PREFETCH(0x340, 0x4));
78 mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
81 static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
88 { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
89 { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
90 { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
91 { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
92 { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
93 { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
94 { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
95 { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
96 { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
97 { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
98 { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
99 { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
100 { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
101 { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
102 { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
103 { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
104 { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
105 { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
106 { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
107 { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
108 { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
109 { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
110 { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
111 { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
112 { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
113 { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
114 { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
115 { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
116 { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
117 { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
118 { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
119 { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
120 { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
121 { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
122 { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
123 { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
124 { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
125 { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
126 { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
127 { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
128 { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
129 { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
130 { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
137 for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
140 if (addr < fixed_map[i].phys)
143 ofs = addr - fixed_map[i].phys;
144 if (ofs > fixed_map[i].size)
147 return fixed_map[i].mapped + ofs;
150 if ((addr >= 0x18000000 && addr < 0x18c00000) ||
151 (addr >= 0x70000000 && addr < 0x78000000) ||
152 (addr >= 0x7c000000 && addr < 0x7c400000))
153 return mt7921_reg_map_l1(dev, addr);
155 dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
161 static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
163 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
164 u32 addr = __mt7921_reg_addr(dev, offset);
166 return dev->bus_ops->rr(mdev, addr);
169 static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
171 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
172 u32 addr = __mt7921_reg_addr(dev, offset);
174 dev->bus_ops->wr(mdev, addr, val);
177 static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
179 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
180 u32 addr = __mt7921_reg_addr(dev, offset);
182 return dev->bus_ops->rmw(mdev, addr, mask, val);
185 static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
189 mt76_clear(dev, MT_WFDMA0_RST,
190 MT_WFDMA0_RST_DMASHDL_ALL_RST |
191 MT_WFDMA0_RST_LOGIC_RST);
193 mt76_set(dev, MT_WFDMA0_RST,
194 MT_WFDMA0_RST_DMASHDL_ALL_RST |
195 MT_WFDMA0_RST_LOGIC_RST);
198 /* disable dmashdl */
199 mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
200 MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
201 mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
204 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
205 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
206 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
207 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
208 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
209 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
211 if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
212 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
213 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
219 static int mt7921_dma_enable(struct mt7921_dev *dev)
221 /* configure perfetch settings */
222 mt7921_dma_prefetch(dev);
225 mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
227 /* configure delay interrupt */
228 mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
230 mt76_set(dev, MT_WFDMA0_GLO_CFG,
231 MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
232 MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
233 MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
234 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
235 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
236 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
238 mt76_set(dev, MT_WFDMA0_GLO_CFG,
239 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
241 mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
243 /* enable interrupts for TX/RX rings */
244 mt7921_irq_enable(dev,
245 MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
247 mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
252 static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
256 err = mt7921_dma_disable(dev, force);
260 /* reset hw queues */
261 for (i = 0; i < __MT_TXQ_MAX; i++)
262 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
264 for (i = 0; i < __MT_MCUQ_MAX; i++)
265 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
267 mt76_for_each_q_rx(&dev->mt76, i)
268 mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
270 mt76_tx_status_check(&dev->mt76, true);
272 return mt7921_dma_enable(dev);
275 int mt7921_wfsys_reset(struct mt7921_dev *dev)
277 mt76_clear(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
279 mt76_set(dev, MT_WFSYS_SW_RST_B, WFSYS_SW_RST_B);
281 if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
282 WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
288 int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force)
292 /* clean up hw queues */
293 for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
294 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
296 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
297 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
299 mt76_for_each_q_rx(&dev->mt76, i)
300 mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
303 err = mt7921_wfsys_reset(dev);
307 err = mt7921_dma_reset(dev, force);
311 mt76_for_each_q_rx(&dev->mt76, i)
312 mt76_queue_rx_reset(dev, i);
317 int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
319 struct mt76_connac_pm *pm = &dev->pm;
322 /* check if the wpdma must be reinitialized */
323 if (mt7921_dma_need_reinit(dev)) {
324 /* disable interrutpts */
325 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
326 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
328 err = mt7921_wpdma_reset(dev, false);
330 dev_err(dev->mt76.dev, "wpdma reset failed\n");
334 /* enable interrutpts */
335 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
342 int mt7921_dma_init(struct mt7921_dev *dev)
344 struct mt76_bus_ops *bus_ops;
348 dev->phy.mt76 = &dev->mt76.phy;
349 dev->mt76.phy.priv = &dev->phy;
350 dev->bus_ops = dev->mt76.bus;
351 bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
356 bus_ops->rr = mt7921_rr;
357 bus_ops->wr = mt7921_wr;
358 bus_ops->rmw = mt7921_rmw;
359 dev->mt76.bus = bus_ops;
361 mt76_dma_attach(&dev->mt76);
363 ret = mt7921_dma_disable(dev, true);
367 ret = mt7921_wfsys_reset(dev);
372 ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
373 MT7921_TX_RING_SIZE);
377 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4);
380 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM,
381 MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
385 /* firmware download */
386 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL,
387 MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
391 /* event from WM before firmware download */
392 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
394 MT7921_RX_MCU_RING_SIZE,
395 MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
399 /* Change mcu queue after firmware download */
400 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
402 MT7921_RX_MCU_RING_SIZE,
403 MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
408 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
409 MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
410 MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
414 ret = mt76_init_queues(dev, mt7921_poll_rx);
418 netif_tx_napi_add(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
419 mt7921_poll_tx, NAPI_POLL_WEIGHT);
420 napi_enable(&dev->mt76.tx_napi);
422 return mt7921_dma_enable(dev);
425 void mt7921_dma_cleanup(struct mt7921_dev *dev)
428 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
429 MT_WFDMA0_GLO_CFG_TX_DMA_EN |
430 MT_WFDMA0_GLO_CFG_RX_DMA_EN |
431 MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
432 MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
433 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
434 MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
437 mt76_clear(dev, MT_WFDMA0_RST,
438 MT_WFDMA0_RST_DMASHDL_ALL_RST |
439 MT_WFDMA0_RST_LOGIC_RST);
441 mt76_set(dev, MT_WFDMA0_RST,
442 MT_WFDMA0_RST_DMASHDL_ALL_RST |
443 MT_WFDMA0_RST_LOGIC_RST);
445 mt76_dma_cleanup(&dev->mt76);