1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
5 #include <linux/module.h>
16 static bool rtw_disable_msi;
17 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
18 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
20 static u32 rtw_pci_tx_queue_idx_addr[] = {
21 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
22 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
23 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
24 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
25 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
26 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
27 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
30 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
33 case RTW_TX_QUEUE_BCN:
34 return TX_DESC_QSEL_BEACON;
35 case RTW_TX_QUEUE_H2C:
36 return TX_DESC_QSEL_H2C;
37 case RTW_TX_QUEUE_MGMT:
38 return TX_DESC_QSEL_MGMT;
39 case RTW_TX_QUEUE_HI0:
40 return TX_DESC_QSEL_HIGH;
46 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
48 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
50 return readb(rtwpci->mmap + addr);
53 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
55 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
57 return readw(rtwpci->mmap + addr);
60 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
62 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
64 return readl(rtwpci->mmap + addr);
67 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
69 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
71 writeb(val, rtwpci->mmap + addr);
74 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
76 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
78 writew(val, rtwpci->mmap + addr);
81 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
83 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
85 writel(val, rtwpci->mmap + addr);
88 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
90 int offset = tx_ring->r.desc_size * idx;
92 return tx_ring->r.head + offset;
95 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
96 struct rtw_pci_tx_ring *tx_ring)
98 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
99 struct rtw_pci_tx_data *tx_data;
100 struct sk_buff *skb, *tmp;
103 /* free every skb remained in tx list */
104 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
105 __skb_unlink(skb, &tx_ring->queue);
106 tx_data = rtw_pci_get_tx_data(skb);
109 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
110 dev_kfree_skb_any(skb);
114 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
115 struct rtw_pci_tx_ring *tx_ring)
117 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
118 u8 *head = tx_ring->r.head;
119 u32 len = tx_ring->r.len;
120 int ring_sz = len * tx_ring->r.desc_size;
122 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
124 /* free the ring itself */
125 pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
126 tx_ring->r.head = NULL;
129 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
130 struct rtw_pci_rx_ring *rx_ring)
132 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
134 int buf_sz = RTK_PCI_RX_BUF_SIZE;
138 for (i = 0; i < rx_ring->r.len; i++) {
139 skb = rx_ring->buf[i];
143 dma = *((dma_addr_t *)skb->cb);
144 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
146 rx_ring->buf[i] = NULL;
150 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
151 struct rtw_pci_rx_ring *rx_ring)
153 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
154 u8 *head = rx_ring->r.head;
155 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
157 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
159 pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
162 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
164 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
165 struct rtw_pci_tx_ring *tx_ring;
166 struct rtw_pci_rx_ring *rx_ring;
169 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
170 tx_ring = &rtwpci->tx_rings[i];
171 rtw_pci_free_tx_ring(rtwdev, tx_ring);
174 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
175 rx_ring = &rtwpci->rx_rings[i];
176 rtw_pci_free_rx_ring(rtwdev, rx_ring);
180 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
181 struct rtw_pci_tx_ring *tx_ring,
182 u8 desc_size, u32 len)
184 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
185 int ring_sz = desc_size * len;
189 if (len > TRX_BD_IDX_MASK) {
190 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
194 head = pci_zalloc_consistent(pdev, ring_sz, &dma);
196 rtw_err(rtwdev, "failed to allocate tx ring\n");
200 skb_queue_head_init(&tx_ring->queue);
201 tx_ring->r.head = head;
202 tx_ring->r.dma = dma;
203 tx_ring->r.len = len;
204 tx_ring->r.desc_size = desc_size;
211 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
212 struct rtw_pci_rx_ring *rx_ring,
213 u32 idx, u32 desc_sz)
215 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
216 struct rtw_pci_rx_buffer_desc *buf_desc;
217 int buf_sz = RTK_PCI_RX_BUF_SIZE;
223 dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
224 if (pci_dma_mapping_error(pdev, dma))
227 *((dma_addr_t *)skb->cb) = dma;
228 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
230 memset(buf_desc, 0, sizeof(*buf_desc));
231 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
232 buf_desc->dma = cpu_to_le32(dma);
237 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
238 struct rtw_pci_rx_ring *rx_ring,
239 u32 idx, u32 desc_sz)
241 struct device *dev = rtwdev->dev;
242 struct rtw_pci_rx_buffer_desc *buf_desc;
243 int buf_sz = RTK_PCI_RX_BUF_SIZE;
245 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
247 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
249 memset(buf_desc, 0, sizeof(*buf_desc));
250 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
251 buf_desc->dma = cpu_to_le32(dma);
254 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
255 struct rtw_pci_rx_ring *rx_ring,
256 u8 desc_size, u32 len)
258 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
259 struct sk_buff *skb = NULL;
262 int ring_sz = desc_size * len;
263 int buf_sz = RTK_PCI_RX_BUF_SIZE;
267 if (len > TRX_BD_IDX_MASK) {
268 rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
272 head = pci_zalloc_consistent(pdev, ring_sz, &dma);
274 rtw_err(rtwdev, "failed to allocate rx ring\n");
277 rx_ring->r.head = head;
279 for (i = 0; i < len; i++) {
280 skb = dev_alloc_skb(buf_sz);
287 memset(skb->data, 0, buf_sz);
288 rx_ring->buf[i] = skb;
289 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
292 dev_kfree_skb_any(skb);
297 rx_ring->r.dma = dma;
298 rx_ring->r.len = len;
299 rx_ring->r.desc_size = desc_size;
306 for (i = 0; i < allocated; i++) {
307 skb = rx_ring->buf[i];
310 dma = *((dma_addr_t *)skb->cb);
311 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
312 dev_kfree_skb_any(skb);
313 rx_ring->buf[i] = NULL;
315 pci_free_consistent(pdev, ring_sz, head, dma);
317 rtw_err(rtwdev, "failed to init rx buffer\n");
322 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
324 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
325 struct rtw_pci_tx_ring *tx_ring;
326 struct rtw_pci_rx_ring *rx_ring;
327 struct rtw_chip_info *chip = rtwdev->chip;
328 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
329 int tx_desc_size, rx_desc_size;
333 tx_desc_size = chip->tx_buf_desc_sz;
335 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
336 tx_ring = &rtwpci->tx_rings[i];
337 len = max_num_of_tx_queue(i);
338 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
343 rx_desc_size = chip->rx_buf_desc_sz;
345 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
346 rx_ring = &rtwpci->rx_rings[j];
347 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
348 RTK_MAX_RX_DESC_NUM);
357 for (i = 0; i < tx_alloced; i++) {
358 tx_ring = &rtwpci->tx_rings[i];
359 rtw_pci_free_tx_ring(rtwdev, tx_ring);
363 for (j = 0; j < rx_alloced; j++) {
364 rx_ring = &rtwpci->rx_rings[j];
365 rtw_pci_free_rx_ring(rtwdev, rx_ring);
371 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
373 rtw_pci_free_trx_ring(rtwdev);
376 static int rtw_pci_init(struct rtw_dev *rtwdev)
378 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
381 rtwpci->irq_mask[0] = IMR_HIGHDOK |
390 rtwpci->irq_mask[1] = IMR_TXFOVW |
392 rtwpci->irq_mask[3] = IMR_H2CDOK |
394 spin_lock_init(&rtwpci->irq_lock);
395 spin_lock_init(&rtwpci->hwirq_lock);
396 ret = rtw_pci_init_trx_ring(rtwdev);
401 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
403 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
408 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
409 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
411 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
412 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
414 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
415 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
416 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
417 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
418 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
419 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
421 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
422 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
423 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
424 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
425 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
426 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
428 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
429 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
430 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
431 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
432 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
433 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
435 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
436 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
437 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
438 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
439 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
440 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
442 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
443 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
444 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
445 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
446 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
447 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
449 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
450 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
451 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
452 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
453 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
454 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
456 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
457 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
458 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
459 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
460 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
461 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
463 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
464 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
465 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
466 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
467 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
468 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
470 /* reset read/write point */
471 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
473 /* reset H2C Queue index in a single write */
474 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
475 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
478 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
480 rtw_pci_reset_buf_desc(rtwdev);
483 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
484 struct rtw_pci *rtwpci)
488 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
490 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
491 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
492 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
493 rtwpci->irq_enabled = true;
495 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
498 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
499 struct rtw_pci *rtwpci)
503 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
505 if (!rtwpci->irq_enabled)
508 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
509 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
510 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
511 rtwpci->irq_enabled = false;
514 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
517 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
519 /* reset dma and rx tag */
520 rtw_write32_set(rtwdev, RTK_PCI_CTRL,
521 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
525 static int rtw_pci_setup(struct rtw_dev *rtwdev)
527 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
529 rtw_pci_reset_trx_ring(rtwdev);
530 rtw_pci_dma_reset(rtwdev, rtwpci);
535 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
537 struct rtw_pci_tx_ring *tx_ring;
540 rtw_pci_reset_trx_ring(rtwdev);
541 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
542 tx_ring = &rtwpci->tx_rings[queue];
543 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
547 static int rtw_pci_start(struct rtw_dev *rtwdev)
549 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
551 spin_lock_bh(&rtwpci->irq_lock);
552 rtw_pci_enable_interrupt(rtwdev, rtwpci);
553 spin_unlock_bh(&rtwpci->irq_lock);
558 static void rtw_pci_stop(struct rtw_dev *rtwdev)
560 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
562 spin_lock_bh(&rtwpci->irq_lock);
563 rtw_pci_disable_interrupt(rtwdev, rtwpci);
564 rtw_pci_dma_release(rtwdev, rtwpci);
565 spin_unlock_bh(&rtwpci->irq_lock);
568 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
570 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
571 struct rtw_pci_tx_ring *tx_ring;
572 bool tx_empty = true;
575 lockdep_assert_held(&rtwpci->irq_lock);
577 /* Deep PS state is not allowed to TX-DMA */
578 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
579 /* BCN queue is rsvd page, does not have DMA interrupt
580 * H2C queue is managed by firmware
582 if (queue == RTW_TX_QUEUE_BCN ||
583 queue == RTW_TX_QUEUE_H2C)
586 tx_ring = &rtwpci->tx_rings[queue];
588 /* check if there is any skb DMAing */
589 if (skb_queue_len(&tx_ring->queue)) {
596 rtw_dbg(rtwdev, RTW_DBG_PS,
597 "TX path not empty, cannot enter deep power save state\n");
601 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
602 rtw_power_mode_change(rtwdev, true);
605 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
607 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
609 lockdep_assert_held(&rtwpci->irq_lock);
611 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
612 rtw_power_mode_change(rtwdev, false);
615 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
617 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
619 spin_lock_bh(&rtwpci->irq_lock);
621 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
622 rtw_pci_deep_ps_enter(rtwdev);
624 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
625 rtw_pci_deep_ps_leave(rtwdev);
627 spin_unlock_bh(&rtwpci->irq_lock);
630 static u8 ac_to_hwq[] = {
631 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
632 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
633 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
634 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
637 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
639 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
640 __le16 fc = hdr->frame_control;
641 u8 q_mapping = skb_get_queue_mapping(skb);
644 if (unlikely(ieee80211_is_beacon(fc)))
645 queue = RTW_TX_QUEUE_BCN;
646 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
647 queue = RTW_TX_QUEUE_MGMT;
648 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
649 queue = ac_to_hwq[IEEE80211_AC_BE];
651 queue = ac_to_hwq[q_mapping];
656 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
657 struct rtw_pci_tx_ring *ring)
659 struct sk_buff *prev = skb_dequeue(&ring->queue);
660 struct rtw_pci_tx_data *tx_data;
666 tx_data = rtw_pci_get_tx_data(prev);
668 pci_unmap_single(rtwpci->pdev, dma, prev->len,
670 dev_kfree_skb_any(prev);
673 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
674 struct rtw_pci_rx_ring *rx_ring,
677 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
678 struct rtw_chip_info *chip = rtwdev->chip;
679 struct rtw_pci_rx_buffer_desc *buf_desc;
680 u32 desc_sz = chip->rx_buf_desc_sz;
683 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
685 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
687 /* rx tag mismatch, throw a warning */
688 if (total_pkt_size != rtwpci->rx_tag)
689 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
691 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
694 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
696 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
697 struct rtw_pci_tx_ring *ring;
700 ring = &rtwpci->tx_rings[queue];
701 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
703 spin_lock_bh(&rtwpci->irq_lock);
704 rtw_pci_deep_ps_leave(rtwdev);
705 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
706 spin_unlock_bh(&rtwpci->irq_lock);
709 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
711 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
714 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
715 if (test_and_clear_bit(queue, rtwpci->tx_queued))
716 rtw_pci_tx_kick_off_queue(rtwdev, queue);
719 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
720 struct rtw_tx_pkt_info *pkt_info,
721 struct sk_buff *skb, u8 queue)
723 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
724 struct rtw_chip_info *chip = rtwdev->chip;
725 struct rtw_pci_tx_ring *ring;
726 struct rtw_pci_tx_data *tx_data;
728 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
729 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
733 struct rtw_pci_tx_buffer_desc *buf_desc;
735 ring = &rtwpci->tx_rings[queue];
739 if (queue == RTW_TX_QUEUE_BCN)
740 rtw_pci_release_rsvd_page(rtwpci, ring);
741 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
744 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
745 memset(pkt_desc, 0, tx_pkt_desc_sz);
746 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
747 rtw_tx_fill_tx_desc(pkt_info, skb);
748 dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
750 if (pci_dma_mapping_error(rtwpci->pdev, dma))
753 /* after this we got dma mapped, there is no way back */
754 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
755 memset(buf_desc, 0, tx_buf_desc_sz);
756 psb_len = (skb->len - 1) / 128 + 1;
757 if (queue == RTW_TX_QUEUE_BCN)
758 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
760 buf_desc[0].psb_len = cpu_to_le16(psb_len);
761 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
762 buf_desc[0].dma = cpu_to_le32(dma);
763 buf_desc[1].buf_size = cpu_to_le16(size);
764 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
766 tx_data = rtw_pci_get_tx_data(skb);
768 tx_data->sn = pkt_info->sn;
770 spin_lock_bh(&rtwpci->irq_lock);
772 skb_queue_tail(&ring->queue, skb);
774 if (queue == RTW_TX_QUEUE_BCN)
777 /* update write-index, and kick it off later */
778 set_bit(queue, rtwpci->tx_queued);
779 if (++ring->r.wp >= ring->r.len)
783 spin_unlock_bh(&rtwpci->irq_lock);
788 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
792 struct rtw_tx_pkt_info pkt_info = {0};
796 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
800 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
802 rtw_err(rtwdev, "failed to write rsvd page data\n");
806 /* reserved pages go through beacon queue */
807 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
808 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
809 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
814 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
817 struct rtw_tx_pkt_info pkt_info = {0};
820 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
824 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
826 rtw_err(rtwdev, "failed to write h2c data\n");
830 rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
835 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
836 struct rtw_tx_pkt_info *pkt_info,
839 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
840 struct rtw_pci_tx_ring *ring;
841 u8 queue = rtw_hw_queue_mapping(skb);
844 ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
848 ring = &rtwpci->tx_rings[queue];
849 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
850 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
851 ring->queue_stopped = true;
857 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
860 struct ieee80211_hw *hw = rtwdev->hw;
861 struct ieee80211_tx_info *info;
862 struct rtw_pci_tx_ring *ring;
863 struct rtw_pci_tx_data *tx_data;
870 ring = &rtwpci->tx_rings[hw_queue];
872 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
873 bd_idx = rtw_read32(rtwdev, bd_idx_addr);
874 cur_rp = bd_idx >> 16;
875 cur_rp &= TRX_BD_IDX_MASK;
876 if (cur_rp >= ring->r.rp)
877 count = cur_rp - ring->r.rp;
879 count = ring->r.len - (ring->r.rp - cur_rp);
882 skb = skb_dequeue(&ring->queue);
884 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
885 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
888 tx_data = rtw_pci_get_tx_data(skb);
889 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
892 /* just free command packets from host to card */
893 if (hw_queue == RTW_TX_QUEUE_H2C) {
894 dev_kfree_skb_irq(skb);
898 if (ring->queue_stopped &&
899 avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
900 q_map = skb_get_queue_mapping(skb);
901 ieee80211_wake_queue(hw, q_map);
902 ring->queue_stopped = false;
905 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
907 info = IEEE80211_SKB_CB(skb);
909 /* enqueue to wait for tx report */
910 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
911 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
915 /* always ACK for others, then they won't be marked as drop */
916 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
917 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
919 info->flags |= IEEE80211_TX_STAT_ACK;
921 ieee80211_tx_info_clear_status(info);
922 ieee80211_tx_status_irqsafe(hw, skb);
928 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
931 struct rtw_chip_info *chip = rtwdev->chip;
932 struct rtw_pci_rx_ring *ring;
933 struct rtw_rx_pkt_stat pkt_stat;
934 struct ieee80211_rx_status rx_status;
935 struct sk_buff *skb, *new;
936 u32 cur_wp, cur_rp, tmp;
939 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
940 u32 buf_desc_sz = chip->rx_buf_desc_sz;
945 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
947 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
949 cur_wp &= TRX_BD_IDX_MASK;
950 if (cur_wp >= ring->r.wp)
951 count = cur_wp - ring->r.wp;
953 count = ring->r.len - (ring->r.wp - cur_wp);
957 rtw_pci_dma_check(rtwdev, ring, cur_rp);
958 skb = ring->buf[cur_rp];
959 dma = *((dma_addr_t *)skb->cb);
960 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
963 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
965 /* offset from rx_desc to payload */
966 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
969 /* allocate a new skb for this frame,
970 * discard the frame if none available
972 new_len = pkt_stat.pkt_len + pkt_offset;
973 new = dev_alloc_skb(new_len);
974 if (WARN_ONCE(!new, "rx routine starvation\n"))
977 /* put the DMA data including rx_desc from phy to new skb */
978 skb_put_data(new, skb->data, new_len);
980 if (pkt_stat.is_c2h) {
981 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
984 skb_pull(new, pkt_offset);
986 rtw_rx_stats(rtwdev, pkt_stat.vif, new);
987 memcpy(new->cb, &rx_status, sizeof(rx_status));
988 ieee80211_rx_irqsafe(rtwdev->hw, new);
992 /* new skb delivered to mac80211, re-enable original skb DMA */
993 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
996 /* host read next element in ring */
997 if (++cur_rp >= ring->r.len)
1001 ring->r.rp = cur_rp;
1002 ring->r.wp = cur_wp;
1003 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1006 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1007 struct rtw_pci *rtwpci, u32 *irq_status)
1009 unsigned long flags;
1011 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1013 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1014 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1015 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1016 irq_status[0] &= rtwpci->irq_mask[0];
1017 irq_status[1] &= rtwpci->irq_mask[1];
1018 irq_status[3] &= rtwpci->irq_mask[3];
1019 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1020 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1021 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1023 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1026 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1028 struct rtw_dev *rtwdev = dev;
1029 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1031 /* disable RTW PCI interrupt to avoid more interrupts before the end of
1034 * disable HIMR here to also avoid new HISR flag being raised before
1035 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1036 * are cleared, the edge-triggered interrupt will not be generated when
1037 * a new HISR flag is set.
1039 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1041 return IRQ_WAKE_THREAD;
1044 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1046 struct rtw_dev *rtwdev = dev;
1047 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1050 spin_lock_bh(&rtwpci->irq_lock);
1051 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1053 if (irq_status[0] & IMR_MGNTDOK)
1054 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1055 if (irq_status[0] & IMR_HIGHDOK)
1056 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1057 if (irq_status[0] & IMR_BEDOK)
1058 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1059 if (irq_status[0] & IMR_BKDOK)
1060 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1061 if (irq_status[0] & IMR_VODOK)
1062 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1063 if (irq_status[0] & IMR_VIDOK)
1064 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1065 if (irq_status[3] & IMR_H2CDOK)
1066 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1067 if (irq_status[0] & IMR_ROK)
1068 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
1070 /* all of the jobs for this interrupt have been done */
1071 rtw_pci_enable_interrupt(rtwdev, rtwpci);
1072 spin_unlock_bh(&rtwpci->irq_lock);
1077 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1078 struct pci_dev *pdev)
1080 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1085 ret = pci_request_regions(pdev, KBUILD_MODNAME);
1087 rtw_err(rtwdev, "failed to request pci regions\n");
1091 len = pci_resource_len(pdev, bar_id);
1092 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1093 if (!rtwpci->mmap) {
1094 rtw_err(rtwdev, "failed to map pci memory\n");
1101 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1102 struct pci_dev *pdev)
1104 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1107 pci_iounmap(pdev, rtwpci->mmap);
1108 pci_release_regions(pdev);
1112 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1115 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1119 write_addr = addr & BITS_DBI_ADDR_MASK;
1120 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1121 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1122 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1123 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1125 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1126 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1133 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1136 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1138 u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1142 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1143 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1145 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1146 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1148 read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1149 *value = rtw_read8(rtwdev, read_addr);
1156 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1160 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1166 rtw_write16(rtwdev, REG_MDIO_V1, data);
1168 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1169 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1170 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1171 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1172 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1174 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1175 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1183 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1186 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1191 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1193 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1198 value |= BIT_CLKREQ_SW_EN;
1200 value &= ~BIT_CLKREQ_SW_EN;
1202 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1205 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1210 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1212 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1217 value |= BIT_L1_SW_EN;
1219 value &= ~BIT_L1_SW_EN;
1221 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1224 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1226 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1228 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1229 * only be enabled when host supports it.
1231 * And ASPM mechanism should be enabled when driver/firmware enters
1232 * power save mode, without having heavy traffic. Because we've
1233 * experienced some inter-operability issues that the link tends
1234 * to enter L1 state on the fly even when driver is having high
1235 * throughput. This is probably because the ASPM behavior slightly
1236 * varies from different SOC.
1238 if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1239 rtw_pci_aspm_set(rtwdev, enter);
1242 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1244 struct rtw_chip_info *chip = rtwdev->chip;
1245 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1246 struct pci_dev *pdev = rtwpci->pdev;
1250 /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1251 * to add clock delay to cover the REFCLK timing gap.
1253 if (chip->id == RTW_CHIP_TYPE_8822C)
1254 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1256 /* Though there is standard PCIE configuration space to set the
1257 * link control register, but by Realtek's design, driver should
1258 * check if host supports CLKREQ/ASPM to enable the HW module.
1260 * These functions are implemented by two HW modules associated,
1261 * one is responsible to access PCIE configuration space to
1262 * follow the host settings, and another is in charge of doing
1263 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1264 * the host does not support it, and due to some reasons or wrong
1265 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1266 * loss if HW misbehaves on the link.
1268 * Hence it's designed that driver should first check the PCIE
1269 * configuration space is sync'ed and enabled, then driver can turn
1270 * on the other module that is actually working on the mechanism.
1272 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1274 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1278 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1279 rtw_pci_clkreq_set(rtwdev, true);
1281 rtwpci->link_ctrl = link_ctrl;
1284 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1286 struct rtw_chip_info *chip = rtwdev->chip;
1289 case RTW_CHIP_TYPE_8822C:
1290 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1291 rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1292 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1299 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1301 struct rtw_chip_info *chip = rtwdev->chip;
1302 const struct rtw_intf_phy_para *para;
1308 cut = BIT(0) << rtwdev->hal.cut_version;
1310 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1311 para = &chip->intf_table->gen1_para[i];
1312 if (!(para->cut_mask & cut))
1314 if (para->offset == 0xffff)
1316 offset = para->offset;
1317 value = para->value;
1318 if (para->ip_sel == RTW_IP_SEL_PHY)
1319 rtw_mdio_write(rtwdev, offset, value, true);
1321 rtw_dbi_write8(rtwdev, offset, value);
1324 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1325 para = &chip->intf_table->gen2_para[i];
1326 if (!(para->cut_mask & cut))
1328 if (para->offset == 0xffff)
1330 offset = para->offset;
1331 value = para->value;
1332 if (para->ip_sel == RTW_IP_SEL_PHY)
1333 rtw_mdio_write(rtwdev, offset, value, false);
1335 rtw_dbi_write8(rtwdev, offset, value);
1338 rtw_pci_link_cfg(rtwdev);
1341 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1346 static int __maybe_unused rtw_pci_resume(struct device *dev)
1351 static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1353 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1357 ret = pci_enable_device(pdev);
1359 rtw_err(rtwdev, "failed to enable pci device\n");
1363 pci_set_master(pdev);
1364 pci_set_drvdata(pdev, rtwdev->hw);
1365 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1370 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1372 pci_clear_master(pdev);
1373 pci_disable_device(pdev);
1376 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1378 struct rtw_pci *rtwpci;
1381 rtwpci = (struct rtw_pci *)rtwdev->priv;
1382 rtwpci->pdev = pdev;
1384 /* after this driver can access to hw registers */
1385 ret = rtw_pci_io_mapping(rtwdev, pdev);
1387 rtw_err(rtwdev, "failed to request pci io region\n");
1391 ret = rtw_pci_init(rtwdev);
1393 rtw_err(rtwdev, "failed to allocate pci resources\n");
1400 rtw_pci_io_unmapping(rtwdev, pdev);
1406 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1408 rtw_pci_deinit(rtwdev);
1409 rtw_pci_io_unmapping(rtwdev, pdev);
1412 static struct rtw_hci_ops rtw_pci_ops = {
1413 .tx_write = rtw_pci_tx_write,
1414 .tx_kick_off = rtw_pci_tx_kick_off,
1415 .setup = rtw_pci_setup,
1416 .start = rtw_pci_start,
1417 .stop = rtw_pci_stop,
1418 .deep_ps = rtw_pci_deep_ps,
1419 .link_ps = rtw_pci_link_ps,
1420 .interface_cfg = rtw_pci_interface_cfg,
1422 .read8 = rtw_pci_read8,
1423 .read16 = rtw_pci_read16,
1424 .read32 = rtw_pci_read32,
1425 .write8 = rtw_pci_write8,
1426 .write16 = rtw_pci_write16,
1427 .write32 = rtw_pci_write32,
1428 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1429 .write_data_h2c = rtw_pci_write_data_h2c,
1432 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1434 unsigned int flags = PCI_IRQ_LEGACY;
1437 if (!rtw_disable_msi)
1438 flags |= PCI_IRQ_MSI;
1440 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1442 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1446 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1447 rtw_pci_interrupt_handler,
1448 rtw_pci_interrupt_threadfn,
1449 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1451 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1452 pci_free_irq_vectors(pdev);
1458 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1460 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1461 pci_free_irq_vectors(pdev);
1464 static int rtw_pci_probe(struct pci_dev *pdev,
1465 const struct pci_device_id *id)
1467 struct ieee80211_hw *hw;
1468 struct rtw_dev *rtwdev;
1472 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1473 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1475 dev_err(&pdev->dev, "failed to allocate hw\n");
1481 rtwdev->dev = &pdev->dev;
1482 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1483 rtwdev->hci.ops = &rtw_pci_ops;
1484 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1486 ret = rtw_core_init(rtwdev);
1488 goto err_release_hw;
1490 rtw_dbg(rtwdev, RTW_DBG_PCI,
1491 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1492 pdev->vendor, pdev->device, pdev->revision);
1494 ret = rtw_pci_claim(rtwdev, pdev);
1496 rtw_err(rtwdev, "failed to claim pci device\n");
1497 goto err_deinit_core;
1500 ret = rtw_pci_setup_resource(rtwdev, pdev);
1502 rtw_err(rtwdev, "failed to setup pci resources\n");
1503 goto err_pci_declaim;
1506 ret = rtw_chip_info_setup(rtwdev);
1508 rtw_err(rtwdev, "failed to setup chip information\n");
1509 goto err_destroy_pci;
1512 rtw_pci_phy_cfg(rtwdev);
1514 ret = rtw_register_hw(rtwdev, hw);
1516 rtw_err(rtwdev, "failed to register hw\n");
1517 goto err_destroy_pci;
1520 ret = rtw_pci_request_irq(rtwdev, pdev);
1522 ieee80211_unregister_hw(hw);
1523 goto err_destroy_pci;
1529 rtw_pci_destroy(rtwdev, pdev);
1532 rtw_pci_declaim(rtwdev, pdev);
1535 rtw_core_deinit(rtwdev);
1538 ieee80211_free_hw(hw);
1543 static void rtw_pci_remove(struct pci_dev *pdev)
1545 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1546 struct rtw_dev *rtwdev;
1547 struct rtw_pci *rtwpci;
1553 rtwpci = (struct rtw_pci *)rtwdev->priv;
1555 rtw_unregister_hw(rtwdev, hw);
1556 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1557 rtw_pci_destroy(rtwdev, pdev);
1558 rtw_pci_declaim(rtwdev, pdev);
1559 rtw_pci_free_irq(rtwdev, pdev);
1560 rtw_core_deinit(rtwdev);
1561 ieee80211_free_hw(hw);
1564 static const struct pci_device_id rtw_pci_id_table[] = {
1565 #ifdef CONFIG_RTW88_8822BE
1566 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1568 #ifdef CONFIG_RTW88_8822CE
1569 { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1573 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1575 static struct pci_driver rtw_pci_driver = {
1577 .id_table = rtw_pci_id_table,
1578 .probe = rtw_pci_probe,
1579 .remove = rtw_pci_remove,
1580 .driver.pm = &rtw_pm_ops,
1582 module_pci_driver(rtw_pci_driver);
1584 MODULE_AUTHOR("Realtek Corporation");
1585 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1586 MODULE_LICENSE("Dual BSD/GPL");