Merge tag 'pstore-v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees...
[linux-2.6-microblaze.git] / drivers / net / wireless / realtek / rtw88 / pci.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019  Realtek Corporation
3  */
4
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include "main.h"
8 #include "pci.h"
9 #include "reg.h"
10 #include "tx.h"
11 #include "rx.h"
12 #include "fw.h"
13 #include "ps.h"
14 #include "debug.h"
15
16 static bool rtw_disable_msi;
17 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
18 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
19
20 static u32 rtw_pci_tx_queue_idx_addr[] = {
21         [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
22         [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
23         [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
24         [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
25         [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
26         [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
27         [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
28 };
29
30 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
31 {
32         switch (queue) {
33         case RTW_TX_QUEUE_BCN:
34                 return TX_DESC_QSEL_BEACON;
35         case RTW_TX_QUEUE_H2C:
36                 return TX_DESC_QSEL_H2C;
37         case RTW_TX_QUEUE_MGMT:
38                 return TX_DESC_QSEL_MGMT;
39         case RTW_TX_QUEUE_HI0:
40                 return TX_DESC_QSEL_HIGH;
41         default:
42                 return skb->priority;
43         }
44 };
45
46 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
47 {
48         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
49
50         return readb(rtwpci->mmap + addr);
51 }
52
53 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
54 {
55         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
56
57         return readw(rtwpci->mmap + addr);
58 }
59
60 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
61 {
62         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
63
64         return readl(rtwpci->mmap + addr);
65 }
66
67 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
68 {
69         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
70
71         writeb(val, rtwpci->mmap + addr);
72 }
73
74 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
75 {
76         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
77
78         writew(val, rtwpci->mmap + addr);
79 }
80
81 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
82 {
83         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
84
85         writel(val, rtwpci->mmap + addr);
86 }
87
88 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
89 {
90         int offset = tx_ring->r.desc_size * idx;
91
92         return tx_ring->r.head + offset;
93 }
94
95 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
96                                       struct rtw_pci_tx_ring *tx_ring)
97 {
98         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
99         struct rtw_pci_tx_data *tx_data;
100         struct sk_buff *skb, *tmp;
101         dma_addr_t dma;
102
103         /* free every skb remained in tx list */
104         skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
105                 __skb_unlink(skb, &tx_ring->queue);
106                 tx_data = rtw_pci_get_tx_data(skb);
107                 dma = tx_data->dma;
108
109                 pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
110                 dev_kfree_skb_any(skb);
111         }
112 }
113
114 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
115                                  struct rtw_pci_tx_ring *tx_ring)
116 {
117         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
118         u8 *head = tx_ring->r.head;
119         u32 len = tx_ring->r.len;
120         int ring_sz = len * tx_ring->r.desc_size;
121
122         rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
123
124         /* free the ring itself */
125         pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
126         tx_ring->r.head = NULL;
127 }
128
129 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
130                                       struct rtw_pci_rx_ring *rx_ring)
131 {
132         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
133         struct sk_buff *skb;
134         int buf_sz = RTK_PCI_RX_BUF_SIZE;
135         dma_addr_t dma;
136         int i;
137
138         for (i = 0; i < rx_ring->r.len; i++) {
139                 skb = rx_ring->buf[i];
140                 if (!skb)
141                         continue;
142
143                 dma = *((dma_addr_t *)skb->cb);
144                 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
145                 dev_kfree_skb(skb);
146                 rx_ring->buf[i] = NULL;
147         }
148 }
149
150 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
151                                  struct rtw_pci_rx_ring *rx_ring)
152 {
153         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
154         u8 *head = rx_ring->r.head;
155         int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
156
157         rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
158
159         pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
160 }
161
162 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
163 {
164         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
165         struct rtw_pci_tx_ring *tx_ring;
166         struct rtw_pci_rx_ring *rx_ring;
167         int i;
168
169         for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
170                 tx_ring = &rtwpci->tx_rings[i];
171                 rtw_pci_free_tx_ring(rtwdev, tx_ring);
172         }
173
174         for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
175                 rx_ring = &rtwpci->rx_rings[i];
176                 rtw_pci_free_rx_ring(rtwdev, rx_ring);
177         }
178 }
179
180 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
181                                 struct rtw_pci_tx_ring *tx_ring,
182                                 u8 desc_size, u32 len)
183 {
184         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
185         int ring_sz = desc_size * len;
186         dma_addr_t dma;
187         u8 *head;
188
189         if (len > TRX_BD_IDX_MASK) {
190                 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
191                 return -EINVAL;
192         }
193
194         head = pci_zalloc_consistent(pdev, ring_sz, &dma);
195         if (!head) {
196                 rtw_err(rtwdev, "failed to allocate tx ring\n");
197                 return -ENOMEM;
198         }
199
200         skb_queue_head_init(&tx_ring->queue);
201         tx_ring->r.head = head;
202         tx_ring->r.dma = dma;
203         tx_ring->r.len = len;
204         tx_ring->r.desc_size = desc_size;
205         tx_ring->r.wp = 0;
206         tx_ring->r.rp = 0;
207
208         return 0;
209 }
210
211 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
212                                  struct rtw_pci_rx_ring *rx_ring,
213                                  u32 idx, u32 desc_sz)
214 {
215         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
216         struct rtw_pci_rx_buffer_desc *buf_desc;
217         int buf_sz = RTK_PCI_RX_BUF_SIZE;
218         dma_addr_t dma;
219
220         if (!skb)
221                 return -EINVAL;
222
223         dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
224         if (pci_dma_mapping_error(pdev, dma))
225                 return -EBUSY;
226
227         *((dma_addr_t *)skb->cb) = dma;
228         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
229                                                      idx * desc_sz);
230         memset(buf_desc, 0, sizeof(*buf_desc));
231         buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
232         buf_desc->dma = cpu_to_le32(dma);
233
234         return 0;
235 }
236
237 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
238                                         struct rtw_pci_rx_ring *rx_ring,
239                                         u32 idx, u32 desc_sz)
240 {
241         struct device *dev = rtwdev->dev;
242         struct rtw_pci_rx_buffer_desc *buf_desc;
243         int buf_sz = RTK_PCI_RX_BUF_SIZE;
244
245         dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
246
247         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
248                                                      idx * desc_sz);
249         memset(buf_desc, 0, sizeof(*buf_desc));
250         buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
251         buf_desc->dma = cpu_to_le32(dma);
252 }
253
254 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
255                                 struct rtw_pci_rx_ring *rx_ring,
256                                 u8 desc_size, u32 len)
257 {
258         struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
259         struct sk_buff *skb = NULL;
260         dma_addr_t dma;
261         u8 *head;
262         int ring_sz = desc_size * len;
263         int buf_sz = RTK_PCI_RX_BUF_SIZE;
264         int i, allocated;
265         int ret = 0;
266
267         if (len > TRX_BD_IDX_MASK) {
268                 rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
269                 return -EINVAL;
270         }
271
272         head = pci_zalloc_consistent(pdev, ring_sz, &dma);
273         if (!head) {
274                 rtw_err(rtwdev, "failed to allocate rx ring\n");
275                 return -ENOMEM;
276         }
277         rx_ring->r.head = head;
278
279         for (i = 0; i < len; i++) {
280                 skb = dev_alloc_skb(buf_sz);
281                 if (!skb) {
282                         allocated = i;
283                         ret = -ENOMEM;
284                         goto err_out;
285                 }
286
287                 memset(skb->data, 0, buf_sz);
288                 rx_ring->buf[i] = skb;
289                 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
290                 if (ret) {
291                         allocated = i;
292                         dev_kfree_skb_any(skb);
293                         goto err_out;
294                 }
295         }
296
297         rx_ring->r.dma = dma;
298         rx_ring->r.len = len;
299         rx_ring->r.desc_size = desc_size;
300         rx_ring->r.wp = 0;
301         rx_ring->r.rp = 0;
302
303         return 0;
304
305 err_out:
306         for (i = 0; i < allocated; i++) {
307                 skb = rx_ring->buf[i];
308                 if (!skb)
309                         continue;
310                 dma = *((dma_addr_t *)skb->cb);
311                 pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
312                 dev_kfree_skb_any(skb);
313                 rx_ring->buf[i] = NULL;
314         }
315         pci_free_consistent(pdev, ring_sz, head, dma);
316
317         rtw_err(rtwdev, "failed to init rx buffer\n");
318
319         return ret;
320 }
321
322 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
323 {
324         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
325         struct rtw_pci_tx_ring *tx_ring;
326         struct rtw_pci_rx_ring *rx_ring;
327         struct rtw_chip_info *chip = rtwdev->chip;
328         int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
329         int tx_desc_size, rx_desc_size;
330         u32 len;
331         int ret;
332
333         tx_desc_size = chip->tx_buf_desc_sz;
334
335         for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
336                 tx_ring = &rtwpci->tx_rings[i];
337                 len = max_num_of_tx_queue(i);
338                 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
339                 if (ret)
340                         goto out;
341         }
342
343         rx_desc_size = chip->rx_buf_desc_sz;
344
345         for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
346                 rx_ring = &rtwpci->rx_rings[j];
347                 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
348                                            RTK_MAX_RX_DESC_NUM);
349                 if (ret)
350                         goto out;
351         }
352
353         return 0;
354
355 out:
356         tx_alloced = i;
357         for (i = 0; i < tx_alloced; i++) {
358                 tx_ring = &rtwpci->tx_rings[i];
359                 rtw_pci_free_tx_ring(rtwdev, tx_ring);
360         }
361
362         rx_alloced = j;
363         for (j = 0; j < rx_alloced; j++) {
364                 rx_ring = &rtwpci->rx_rings[j];
365                 rtw_pci_free_rx_ring(rtwdev, rx_ring);
366         }
367
368         return ret;
369 }
370
371 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
372 {
373         rtw_pci_free_trx_ring(rtwdev);
374 }
375
376 static int rtw_pci_init(struct rtw_dev *rtwdev)
377 {
378         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
379         int ret = 0;
380
381         rtwpci->irq_mask[0] = IMR_HIGHDOK |
382                               IMR_MGNTDOK |
383                               IMR_BKDOK |
384                               IMR_BEDOK |
385                               IMR_VIDOK |
386                               IMR_VODOK |
387                               IMR_ROK |
388                               IMR_BCNDMAINT_E |
389                               0;
390         rtwpci->irq_mask[1] = IMR_TXFOVW |
391                               0;
392         rtwpci->irq_mask[3] = IMR_H2CDOK |
393                               0;
394         spin_lock_init(&rtwpci->irq_lock);
395         spin_lock_init(&rtwpci->hwirq_lock);
396         ret = rtw_pci_init_trx_ring(rtwdev);
397
398         return ret;
399 }
400
401 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
402 {
403         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
404         u32 len;
405         u8 tmp;
406         dma_addr_t dma;
407
408         tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
409         rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
410
411         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
412         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
413
414         len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
415         dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
416         rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
417         rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
418         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
419         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
420
421         len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
422         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
423         rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
424         rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
425         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
426         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
427
428         len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
429         dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
430         rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
431         rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
432         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
433         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
434
435         len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
436         dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
437         rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
438         rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
439         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
440         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
441
442         len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
443         dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
444         rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
445         rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
446         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
447         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
448
449         len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
450         dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
451         rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
452         rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
453         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
454         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
455
456         len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
457         dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
458         rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
459         rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
460         rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
461         rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
462
463         len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
464         dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
465         rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
466         rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
467         rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
468         rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
469
470         /* reset read/write point */
471         rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
472
473         /* reset H2C Queue index in a single write */
474         rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
475                         BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
476 }
477
478 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
479 {
480         rtw_pci_reset_buf_desc(rtwdev);
481 }
482
483 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
484                                      struct rtw_pci *rtwpci)
485 {
486         unsigned long flags;
487
488         spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
489
490         rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
491         rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
492         rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
493         rtwpci->irq_enabled = true;
494
495         spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
496 }
497
498 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
499                                       struct rtw_pci *rtwpci)
500 {
501         unsigned long flags;
502
503         spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
504
505         if (!rtwpci->irq_enabled)
506                 goto out;
507
508         rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
509         rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
510         rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
511         rtwpci->irq_enabled = false;
512
513 out:
514         spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
515 }
516
517 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
518 {
519         /* reset dma and rx tag */
520         rtw_write32_set(rtwdev, RTK_PCI_CTRL,
521                         BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
522         rtwpci->rx_tag = 0;
523 }
524
525 static int rtw_pci_setup(struct rtw_dev *rtwdev)
526 {
527         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
528
529         rtw_pci_reset_trx_ring(rtwdev);
530         rtw_pci_dma_reset(rtwdev, rtwpci);
531
532         return 0;
533 }
534
535 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
536 {
537         struct rtw_pci_tx_ring *tx_ring;
538         u8 queue;
539
540         rtw_pci_reset_trx_ring(rtwdev);
541         for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
542                 tx_ring = &rtwpci->tx_rings[queue];
543                 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
544         }
545 }
546
547 static int rtw_pci_start(struct rtw_dev *rtwdev)
548 {
549         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
550
551         spin_lock_bh(&rtwpci->irq_lock);
552         rtw_pci_enable_interrupt(rtwdev, rtwpci);
553         spin_unlock_bh(&rtwpci->irq_lock);
554
555         return 0;
556 }
557
558 static void rtw_pci_stop(struct rtw_dev *rtwdev)
559 {
560         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
561
562         spin_lock_bh(&rtwpci->irq_lock);
563         rtw_pci_disable_interrupt(rtwdev, rtwpci);
564         rtw_pci_dma_release(rtwdev, rtwpci);
565         spin_unlock_bh(&rtwpci->irq_lock);
566 }
567
568 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
569 {
570         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
571         struct rtw_pci_tx_ring *tx_ring;
572         bool tx_empty = true;
573         u8 queue;
574
575         lockdep_assert_held(&rtwpci->irq_lock);
576
577         /* Deep PS state is not allowed to TX-DMA */
578         for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
579                 /* BCN queue is rsvd page, does not have DMA interrupt
580                  * H2C queue is managed by firmware
581                  */
582                 if (queue == RTW_TX_QUEUE_BCN ||
583                     queue == RTW_TX_QUEUE_H2C)
584                         continue;
585
586                 tx_ring = &rtwpci->tx_rings[queue];
587
588                 /* check if there is any skb DMAing */
589                 if (skb_queue_len(&tx_ring->queue)) {
590                         tx_empty = false;
591                         break;
592                 }
593         }
594
595         if (!tx_empty) {
596                 rtw_dbg(rtwdev, RTW_DBG_PS,
597                         "TX path not empty, cannot enter deep power save state\n");
598                 return;
599         }
600
601         set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
602         rtw_power_mode_change(rtwdev, true);
603 }
604
605 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
606 {
607         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
608
609         lockdep_assert_held(&rtwpci->irq_lock);
610
611         if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
612                 rtw_power_mode_change(rtwdev, false);
613 }
614
615 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
616 {
617         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
618
619         spin_lock_bh(&rtwpci->irq_lock);
620
621         if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
622                 rtw_pci_deep_ps_enter(rtwdev);
623
624         if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
625                 rtw_pci_deep_ps_leave(rtwdev);
626
627         spin_unlock_bh(&rtwpci->irq_lock);
628 }
629
630 static u8 ac_to_hwq[] = {
631         [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
632         [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
633         [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
634         [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
635 };
636
637 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
638 {
639         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
640         __le16 fc = hdr->frame_control;
641         u8 q_mapping = skb_get_queue_mapping(skb);
642         u8 queue;
643
644         if (unlikely(ieee80211_is_beacon(fc)))
645                 queue = RTW_TX_QUEUE_BCN;
646         else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
647                 queue = RTW_TX_QUEUE_MGMT;
648         else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
649                 queue = ac_to_hwq[IEEE80211_AC_BE];
650         else
651                 queue = ac_to_hwq[q_mapping];
652
653         return queue;
654 }
655
656 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
657                                       struct rtw_pci_tx_ring *ring)
658 {
659         struct sk_buff *prev = skb_dequeue(&ring->queue);
660         struct rtw_pci_tx_data *tx_data;
661         dma_addr_t dma;
662
663         if (!prev)
664                 return;
665
666         tx_data = rtw_pci_get_tx_data(prev);
667         dma = tx_data->dma;
668         pci_unmap_single(rtwpci->pdev, dma, prev->len,
669                          PCI_DMA_TODEVICE);
670         dev_kfree_skb_any(prev);
671 }
672
673 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
674                               struct rtw_pci_rx_ring *rx_ring,
675                               u32 idx)
676 {
677         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
678         struct rtw_chip_info *chip = rtwdev->chip;
679         struct rtw_pci_rx_buffer_desc *buf_desc;
680         u32 desc_sz = chip->rx_buf_desc_sz;
681         u16 total_pkt_size;
682
683         buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
684                                                      idx * desc_sz);
685         total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
686
687         /* rx tag mismatch, throw a warning */
688         if (total_pkt_size != rtwpci->rx_tag)
689                 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
690
691         rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
692 }
693
694 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
695 {
696         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
697         struct rtw_pci_tx_ring *ring;
698         u32 bd_idx;
699
700         ring = &rtwpci->tx_rings[queue];
701         bd_idx = rtw_pci_tx_queue_idx_addr[queue];
702
703         spin_lock_bh(&rtwpci->irq_lock);
704         rtw_pci_deep_ps_leave(rtwdev);
705         rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
706         spin_unlock_bh(&rtwpci->irq_lock);
707 }
708
709 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
710 {
711         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
712         u8 queue;
713
714         for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
715                 if (test_and_clear_bit(queue, rtwpci->tx_queued))
716                         rtw_pci_tx_kick_off_queue(rtwdev, queue);
717 }
718
719 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
720                                  struct rtw_tx_pkt_info *pkt_info,
721                                  struct sk_buff *skb, u8 queue)
722 {
723         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
724         struct rtw_chip_info *chip = rtwdev->chip;
725         struct rtw_pci_tx_ring *ring;
726         struct rtw_pci_tx_data *tx_data;
727         dma_addr_t dma;
728         u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
729         u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
730         u32 size;
731         u32 psb_len;
732         u8 *pkt_desc;
733         struct rtw_pci_tx_buffer_desc *buf_desc;
734
735         ring = &rtwpci->tx_rings[queue];
736
737         size = skb->len;
738
739         if (queue == RTW_TX_QUEUE_BCN)
740                 rtw_pci_release_rsvd_page(rtwpci, ring);
741         else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
742                 return -ENOSPC;
743
744         pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
745         memset(pkt_desc, 0, tx_pkt_desc_sz);
746         pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
747         rtw_tx_fill_tx_desc(pkt_info, skb);
748         dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
749                              PCI_DMA_TODEVICE);
750         if (pci_dma_mapping_error(rtwpci->pdev, dma))
751                 return -EBUSY;
752
753         /* after this we got dma mapped, there is no way back */
754         buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
755         memset(buf_desc, 0, tx_buf_desc_sz);
756         psb_len = (skb->len - 1) / 128 + 1;
757         if (queue == RTW_TX_QUEUE_BCN)
758                 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
759
760         buf_desc[0].psb_len = cpu_to_le16(psb_len);
761         buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
762         buf_desc[0].dma = cpu_to_le32(dma);
763         buf_desc[1].buf_size = cpu_to_le16(size);
764         buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
765
766         tx_data = rtw_pci_get_tx_data(skb);
767         tx_data->dma = dma;
768         tx_data->sn = pkt_info->sn;
769
770         spin_lock_bh(&rtwpci->irq_lock);
771
772         skb_queue_tail(&ring->queue, skb);
773
774         if (queue == RTW_TX_QUEUE_BCN)
775                 goto out_unlock;
776
777         /* update write-index, and kick it off later */
778         set_bit(queue, rtwpci->tx_queued);
779         if (++ring->r.wp >= ring->r.len)
780                 ring->r.wp = 0;
781
782 out_unlock:
783         spin_unlock_bh(&rtwpci->irq_lock);
784
785         return 0;
786 }
787
788 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
789                                         u32 size)
790 {
791         struct sk_buff *skb;
792         struct rtw_tx_pkt_info pkt_info = {0};
793         u8 reg_bcn_work;
794         int ret;
795
796         skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
797         if (!skb)
798                 return -ENOMEM;
799
800         ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
801         if (ret) {
802                 rtw_err(rtwdev, "failed to write rsvd page data\n");
803                 return ret;
804         }
805
806         /* reserved pages go through beacon queue */
807         reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
808         reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
809         rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
810
811         return 0;
812 }
813
814 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
815 {
816         struct sk_buff *skb;
817         struct rtw_tx_pkt_info pkt_info = {0};
818         int ret;
819
820         skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
821         if (!skb)
822                 return -ENOMEM;
823
824         ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
825         if (ret) {
826                 rtw_err(rtwdev, "failed to write h2c data\n");
827                 return ret;
828         }
829
830         rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
831
832         return 0;
833 }
834
835 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
836                             struct rtw_tx_pkt_info *pkt_info,
837                             struct sk_buff *skb)
838 {
839         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
840         struct rtw_pci_tx_ring *ring;
841         u8 queue = rtw_hw_queue_mapping(skb);
842         int ret;
843
844         ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
845         if (ret)
846                 return ret;
847
848         ring = &rtwpci->tx_rings[queue];
849         if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
850                 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
851                 ring->queue_stopped = true;
852         }
853
854         return 0;
855 }
856
857 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
858                            u8 hw_queue)
859 {
860         struct ieee80211_hw *hw = rtwdev->hw;
861         struct ieee80211_tx_info *info;
862         struct rtw_pci_tx_ring *ring;
863         struct rtw_pci_tx_data *tx_data;
864         struct sk_buff *skb;
865         u32 count;
866         u32 bd_idx_addr;
867         u32 bd_idx, cur_rp;
868         u16 q_map;
869
870         ring = &rtwpci->tx_rings[hw_queue];
871
872         bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
873         bd_idx = rtw_read32(rtwdev, bd_idx_addr);
874         cur_rp = bd_idx >> 16;
875         cur_rp &= TRX_BD_IDX_MASK;
876         if (cur_rp >= ring->r.rp)
877                 count = cur_rp - ring->r.rp;
878         else
879                 count = ring->r.len - (ring->r.rp - cur_rp);
880
881         while (count--) {
882                 skb = skb_dequeue(&ring->queue);
883                 if (!skb) {
884                         rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
885                                 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
886                         break;
887                 }
888                 tx_data = rtw_pci_get_tx_data(skb);
889                 pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
890                                  PCI_DMA_TODEVICE);
891
892                 /* just free command packets from host to card */
893                 if (hw_queue == RTW_TX_QUEUE_H2C) {
894                         dev_kfree_skb_irq(skb);
895                         continue;
896                 }
897
898                 if (ring->queue_stopped &&
899                     avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
900                         q_map = skb_get_queue_mapping(skb);
901                         ieee80211_wake_queue(hw, q_map);
902                         ring->queue_stopped = false;
903                 }
904
905                 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
906
907                 info = IEEE80211_SKB_CB(skb);
908
909                 /* enqueue to wait for tx report */
910                 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
911                         rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
912                         continue;
913                 }
914
915                 /* always ACK for others, then they won't be marked as drop */
916                 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
917                         info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
918                 else
919                         info->flags |= IEEE80211_TX_STAT_ACK;
920
921                 ieee80211_tx_info_clear_status(info);
922                 ieee80211_tx_status_irqsafe(hw, skb);
923         }
924
925         ring->r.rp = cur_rp;
926 }
927
928 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
929                            u8 hw_queue)
930 {
931         struct rtw_chip_info *chip = rtwdev->chip;
932         struct rtw_pci_rx_ring *ring;
933         struct rtw_rx_pkt_stat pkt_stat;
934         struct ieee80211_rx_status rx_status;
935         struct sk_buff *skb, *new;
936         u32 cur_wp, cur_rp, tmp;
937         u32 count;
938         u32 pkt_offset;
939         u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
940         u32 buf_desc_sz = chip->rx_buf_desc_sz;
941         u32 new_len;
942         u8 *rx_desc;
943         dma_addr_t dma;
944
945         ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
946
947         tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
948         cur_wp = tmp >> 16;
949         cur_wp &= TRX_BD_IDX_MASK;
950         if (cur_wp >= ring->r.wp)
951                 count = cur_wp - ring->r.wp;
952         else
953                 count = ring->r.len - (ring->r.wp - cur_wp);
954
955         cur_rp = ring->r.rp;
956         while (count--) {
957                 rtw_pci_dma_check(rtwdev, ring, cur_rp);
958                 skb = ring->buf[cur_rp];
959                 dma = *((dma_addr_t *)skb->cb);
960                 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
961                                         DMA_FROM_DEVICE);
962                 rx_desc = skb->data;
963                 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
964
965                 /* offset from rx_desc to payload */
966                 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
967                              pkt_stat.shift;
968
969                 /* allocate a new skb for this frame,
970                  * discard the frame if none available
971                  */
972                 new_len = pkt_stat.pkt_len + pkt_offset;
973                 new = dev_alloc_skb(new_len);
974                 if (WARN_ONCE(!new, "rx routine starvation\n"))
975                         goto next_rp;
976
977                 /* put the DMA data including rx_desc from phy to new skb */
978                 skb_put_data(new, skb->data, new_len);
979
980                 if (pkt_stat.is_c2h) {
981                         rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
982                 } else {
983                         /* remove rx_desc */
984                         skb_pull(new, pkt_offset);
985
986                         rtw_rx_stats(rtwdev, pkt_stat.vif, new);
987                         memcpy(new->cb, &rx_status, sizeof(rx_status));
988                         ieee80211_rx_irqsafe(rtwdev->hw, new);
989                 }
990
991 next_rp:
992                 /* new skb delivered to mac80211, re-enable original skb DMA */
993                 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
994                                             buf_desc_sz);
995
996                 /* host read next element in ring */
997                 if (++cur_rp >= ring->r.len)
998                         cur_rp = 0;
999         }
1000
1001         ring->r.rp = cur_rp;
1002         ring->r.wp = cur_wp;
1003         rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1004 }
1005
1006 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1007                                    struct rtw_pci *rtwpci, u32 *irq_status)
1008 {
1009         unsigned long flags;
1010
1011         spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1012
1013         irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1014         irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1015         irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1016         irq_status[0] &= rtwpci->irq_mask[0];
1017         irq_status[1] &= rtwpci->irq_mask[1];
1018         irq_status[3] &= rtwpci->irq_mask[3];
1019         rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1020         rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1021         rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1022
1023         spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1024 }
1025
1026 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1027 {
1028         struct rtw_dev *rtwdev = dev;
1029         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1030
1031         /* disable RTW PCI interrupt to avoid more interrupts before the end of
1032          * thread function
1033          *
1034          * disable HIMR here to also avoid new HISR flag being raised before
1035          * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1036          * are cleared, the edge-triggered interrupt will not be generated when
1037          * a new HISR flag is set.
1038          */
1039         rtw_pci_disable_interrupt(rtwdev, rtwpci);
1040
1041         return IRQ_WAKE_THREAD;
1042 }
1043
1044 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1045 {
1046         struct rtw_dev *rtwdev = dev;
1047         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1048         u32 irq_status[4];
1049
1050         spin_lock_bh(&rtwpci->irq_lock);
1051         rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1052
1053         if (irq_status[0] & IMR_MGNTDOK)
1054                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1055         if (irq_status[0] & IMR_HIGHDOK)
1056                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1057         if (irq_status[0] & IMR_BEDOK)
1058                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1059         if (irq_status[0] & IMR_BKDOK)
1060                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1061         if (irq_status[0] & IMR_VODOK)
1062                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1063         if (irq_status[0] & IMR_VIDOK)
1064                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1065         if (irq_status[3] & IMR_H2CDOK)
1066                 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1067         if (irq_status[0] & IMR_ROK)
1068                 rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
1069
1070         /* all of the jobs for this interrupt have been done */
1071         rtw_pci_enable_interrupt(rtwdev, rtwpci);
1072         spin_unlock_bh(&rtwpci->irq_lock);
1073
1074         return IRQ_HANDLED;
1075 }
1076
1077 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1078                               struct pci_dev *pdev)
1079 {
1080         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1081         unsigned long len;
1082         u8 bar_id = 2;
1083         int ret;
1084
1085         ret = pci_request_regions(pdev, KBUILD_MODNAME);
1086         if (ret) {
1087                 rtw_err(rtwdev, "failed to request pci regions\n");
1088                 return ret;
1089         }
1090
1091         len = pci_resource_len(pdev, bar_id);
1092         rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1093         if (!rtwpci->mmap) {
1094                 rtw_err(rtwdev, "failed to map pci memory\n");
1095                 return -ENOMEM;
1096         }
1097
1098         return 0;
1099 }
1100
1101 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1102                                  struct pci_dev *pdev)
1103 {
1104         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1105
1106         if (rtwpci->mmap) {
1107                 pci_iounmap(pdev, rtwpci->mmap);
1108                 pci_release_regions(pdev);
1109         }
1110 }
1111
1112 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1113 {
1114         u16 write_addr;
1115         u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1116         u8 flag;
1117         u8 cnt;
1118
1119         write_addr = addr & BITS_DBI_ADDR_MASK;
1120         write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1121         rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1122         rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1123         rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1124
1125         for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1126                 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1127                 if (flag == 0)
1128                         return;
1129
1130                 udelay(10);
1131         }
1132
1133         WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1134 }
1135
1136 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1137 {
1138         u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1139         u8 flag;
1140         u8 cnt;
1141
1142         rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1143         rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1144
1145         for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1146                 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1147                 if (flag == 0) {
1148                         read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1149                         *value = rtw_read8(rtwdev, read_addr);
1150                         return 0;
1151                 }
1152
1153                 udelay(10);
1154         }
1155
1156         WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1157         return -EIO;
1158 }
1159
1160 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1161 {
1162         u8 page;
1163         u8 wflag;
1164         u8 cnt;
1165
1166         rtw_write16(rtwdev, REG_MDIO_V1, data);
1167
1168         page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1169         page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1170         rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1171         rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1172         rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1173
1174         for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1175                 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1176                                         BIT_MDIO_WFLAG_V1);
1177                 if (wflag == 0)
1178                         return;
1179
1180                 udelay(10);
1181         }
1182
1183         WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1184 }
1185
1186 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1187 {
1188         u8 value;
1189         int ret;
1190
1191         ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1192         if (ret) {
1193                 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1194                 return;
1195         }
1196
1197         if (enable)
1198                 value |= BIT_CLKREQ_SW_EN;
1199         else
1200                 value &= ~BIT_CLKREQ_SW_EN;
1201
1202         rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1203 }
1204
1205 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1206 {
1207         u8 value;
1208         int ret;
1209
1210         ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1211         if (ret) {
1212                 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1213                 return;
1214         }
1215
1216         if (enable)
1217                 value |= BIT_L1_SW_EN;
1218         else
1219                 value &= ~BIT_L1_SW_EN;
1220
1221         rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1222 }
1223
1224 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1225 {
1226         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1227
1228         /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1229          * only be enabled when host supports it.
1230          *
1231          * And ASPM mechanism should be enabled when driver/firmware enters
1232          * power save mode, without having heavy traffic. Because we've
1233          * experienced some inter-operability issues that the link tends
1234          * to enter L1 state on the fly even when driver is having high
1235          * throughput. This is probably because the ASPM behavior slightly
1236          * varies from different SOC.
1237          */
1238         if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1239                 rtw_pci_aspm_set(rtwdev, enter);
1240 }
1241
1242 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1243 {
1244         struct rtw_chip_info *chip = rtwdev->chip;
1245         struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1246         struct pci_dev *pdev = rtwpci->pdev;
1247         u16 link_ctrl;
1248         int ret;
1249
1250         /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1251          * to add clock delay to cover the REFCLK timing gap.
1252          */
1253         if (chip->id == RTW_CHIP_TYPE_8822C)
1254                 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1255
1256         /* Though there is standard PCIE configuration space to set the
1257          * link control register, but by Realtek's design, driver should
1258          * check if host supports CLKREQ/ASPM to enable the HW module.
1259          *
1260          * These functions are implemented by two HW modules associated,
1261          * one is responsible to access PCIE configuration space to
1262          * follow the host settings, and another is in charge of doing
1263          * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1264          * the host does not support it, and due to some reasons or wrong
1265          * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1266          * loss if HW misbehaves on the link.
1267          *
1268          * Hence it's designed that driver should first check the PCIE
1269          * configuration space is sync'ed and enabled, then driver can turn
1270          * on the other module that is actually working on the mechanism.
1271          */
1272         ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1273         if (ret) {
1274                 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1275                 return;
1276         }
1277
1278         if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1279                 rtw_pci_clkreq_set(rtwdev, true);
1280
1281         rtwpci->link_ctrl = link_ctrl;
1282 }
1283
1284 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1285 {
1286         struct rtw_chip_info *chip = rtwdev->chip;
1287
1288         switch (chip->id) {
1289         case RTW_CHIP_TYPE_8822C:
1290                 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1291                         rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1292                                          BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1293                 break;
1294         default:
1295                 break;
1296         }
1297 }
1298
1299 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1300 {
1301         struct rtw_chip_info *chip = rtwdev->chip;
1302         const struct rtw_intf_phy_para *para;
1303         u16 cut;
1304         u16 value;
1305         u16 offset;
1306         int i;
1307
1308         cut = BIT(0) << rtwdev->hal.cut_version;
1309
1310         for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1311                 para = &chip->intf_table->gen1_para[i];
1312                 if (!(para->cut_mask & cut))
1313                         continue;
1314                 if (para->offset == 0xffff)
1315                         break;
1316                 offset = para->offset;
1317                 value = para->value;
1318                 if (para->ip_sel == RTW_IP_SEL_PHY)
1319                         rtw_mdio_write(rtwdev, offset, value, true);
1320                 else
1321                         rtw_dbi_write8(rtwdev, offset, value);
1322         }
1323
1324         for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1325                 para = &chip->intf_table->gen2_para[i];
1326                 if (!(para->cut_mask & cut))
1327                         continue;
1328                 if (para->offset == 0xffff)
1329                         break;
1330                 offset = para->offset;
1331                 value = para->value;
1332                 if (para->ip_sel == RTW_IP_SEL_PHY)
1333                         rtw_mdio_write(rtwdev, offset, value, false);
1334                 else
1335                         rtw_dbi_write8(rtwdev, offset, value);
1336         }
1337
1338         rtw_pci_link_cfg(rtwdev);
1339 }
1340
1341 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1342 {
1343         return 0;
1344 }
1345
1346 static int __maybe_unused rtw_pci_resume(struct device *dev)
1347 {
1348         return 0;
1349 }
1350
1351 static SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1352
1353 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1354 {
1355         int ret;
1356
1357         ret = pci_enable_device(pdev);
1358         if (ret) {
1359                 rtw_err(rtwdev, "failed to enable pci device\n");
1360                 return ret;
1361         }
1362
1363         pci_set_master(pdev);
1364         pci_set_drvdata(pdev, rtwdev->hw);
1365         SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1366
1367         return 0;
1368 }
1369
1370 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1371 {
1372         pci_clear_master(pdev);
1373         pci_disable_device(pdev);
1374 }
1375
1376 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1377 {
1378         struct rtw_pci *rtwpci;
1379         int ret;
1380
1381         rtwpci = (struct rtw_pci *)rtwdev->priv;
1382         rtwpci->pdev = pdev;
1383
1384         /* after this driver can access to hw registers */
1385         ret = rtw_pci_io_mapping(rtwdev, pdev);
1386         if (ret) {
1387                 rtw_err(rtwdev, "failed to request pci io region\n");
1388                 goto err_out;
1389         }
1390
1391         ret = rtw_pci_init(rtwdev);
1392         if (ret) {
1393                 rtw_err(rtwdev, "failed to allocate pci resources\n");
1394                 goto err_io_unmap;
1395         }
1396
1397         return 0;
1398
1399 err_io_unmap:
1400         rtw_pci_io_unmapping(rtwdev, pdev);
1401
1402 err_out:
1403         return ret;
1404 }
1405
1406 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1407 {
1408         rtw_pci_deinit(rtwdev);
1409         rtw_pci_io_unmapping(rtwdev, pdev);
1410 }
1411
1412 static struct rtw_hci_ops rtw_pci_ops = {
1413         .tx_write = rtw_pci_tx_write,
1414         .tx_kick_off = rtw_pci_tx_kick_off,
1415         .setup = rtw_pci_setup,
1416         .start = rtw_pci_start,
1417         .stop = rtw_pci_stop,
1418         .deep_ps = rtw_pci_deep_ps,
1419         .link_ps = rtw_pci_link_ps,
1420         .interface_cfg = rtw_pci_interface_cfg,
1421
1422         .read8 = rtw_pci_read8,
1423         .read16 = rtw_pci_read16,
1424         .read32 = rtw_pci_read32,
1425         .write8 = rtw_pci_write8,
1426         .write16 = rtw_pci_write16,
1427         .write32 = rtw_pci_write32,
1428         .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1429         .write_data_h2c = rtw_pci_write_data_h2c,
1430 };
1431
1432 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1433 {
1434         unsigned int flags = PCI_IRQ_LEGACY;
1435         int ret;
1436
1437         if (!rtw_disable_msi)
1438                 flags |= PCI_IRQ_MSI;
1439
1440         ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1441         if (ret < 0) {
1442                 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1443                 return ret;
1444         }
1445
1446         ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1447                                         rtw_pci_interrupt_handler,
1448                                         rtw_pci_interrupt_threadfn,
1449                                         IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1450         if (ret) {
1451                 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1452                 pci_free_irq_vectors(pdev);
1453         }
1454
1455         return ret;
1456 }
1457
1458 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1459 {
1460         devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1461         pci_free_irq_vectors(pdev);
1462 }
1463
1464 static int rtw_pci_probe(struct pci_dev *pdev,
1465                          const struct pci_device_id *id)
1466 {
1467         struct ieee80211_hw *hw;
1468         struct rtw_dev *rtwdev;
1469         int drv_data_size;
1470         int ret;
1471
1472         drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1473         hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1474         if (!hw) {
1475                 dev_err(&pdev->dev, "failed to allocate hw\n");
1476                 return -ENOMEM;
1477         }
1478
1479         rtwdev = hw->priv;
1480         rtwdev->hw = hw;
1481         rtwdev->dev = &pdev->dev;
1482         rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1483         rtwdev->hci.ops = &rtw_pci_ops;
1484         rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1485
1486         ret = rtw_core_init(rtwdev);
1487         if (ret)
1488                 goto err_release_hw;
1489
1490         rtw_dbg(rtwdev, RTW_DBG_PCI,
1491                 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1492                 pdev->vendor, pdev->device, pdev->revision);
1493
1494         ret = rtw_pci_claim(rtwdev, pdev);
1495         if (ret) {
1496                 rtw_err(rtwdev, "failed to claim pci device\n");
1497                 goto err_deinit_core;
1498         }
1499
1500         ret = rtw_pci_setup_resource(rtwdev, pdev);
1501         if (ret) {
1502                 rtw_err(rtwdev, "failed to setup pci resources\n");
1503                 goto err_pci_declaim;
1504         }
1505
1506         ret = rtw_chip_info_setup(rtwdev);
1507         if (ret) {
1508                 rtw_err(rtwdev, "failed to setup chip information\n");
1509                 goto err_destroy_pci;
1510         }
1511
1512         rtw_pci_phy_cfg(rtwdev);
1513
1514         ret = rtw_register_hw(rtwdev, hw);
1515         if (ret) {
1516                 rtw_err(rtwdev, "failed to register hw\n");
1517                 goto err_destroy_pci;
1518         }
1519
1520         ret = rtw_pci_request_irq(rtwdev, pdev);
1521         if (ret) {
1522                 ieee80211_unregister_hw(hw);
1523                 goto err_destroy_pci;
1524         }
1525
1526         return 0;
1527
1528 err_destroy_pci:
1529         rtw_pci_destroy(rtwdev, pdev);
1530
1531 err_pci_declaim:
1532         rtw_pci_declaim(rtwdev, pdev);
1533
1534 err_deinit_core:
1535         rtw_core_deinit(rtwdev);
1536
1537 err_release_hw:
1538         ieee80211_free_hw(hw);
1539
1540         return ret;
1541 }
1542
1543 static void rtw_pci_remove(struct pci_dev *pdev)
1544 {
1545         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1546         struct rtw_dev *rtwdev;
1547         struct rtw_pci *rtwpci;
1548
1549         if (!hw)
1550                 return;
1551
1552         rtwdev = hw->priv;
1553         rtwpci = (struct rtw_pci *)rtwdev->priv;
1554
1555         rtw_unregister_hw(rtwdev, hw);
1556         rtw_pci_disable_interrupt(rtwdev, rtwpci);
1557         rtw_pci_destroy(rtwdev, pdev);
1558         rtw_pci_declaim(rtwdev, pdev);
1559         rtw_pci_free_irq(rtwdev, pdev);
1560         rtw_core_deinit(rtwdev);
1561         ieee80211_free_hw(hw);
1562 }
1563
1564 static const struct pci_device_id rtw_pci_id_table[] = {
1565 #ifdef CONFIG_RTW88_8822BE
1566         { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
1567 #endif
1568 #ifdef CONFIG_RTW88_8822CE
1569         { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
1570 #endif
1571         {},
1572 };
1573 MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
1574
1575 static struct pci_driver rtw_pci_driver = {
1576         .name = "rtw_pci",
1577         .id_table = rtw_pci_id_table,
1578         .probe = rtw_pci_probe,
1579         .remove = rtw_pci_remove,
1580         .driver.pm = &rtw_pm_ops,
1581 };
1582 module_pci_driver(rtw_pci_driver);
1583
1584 MODULE_AUTHOR("Realtek Corporation");
1585 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1586 MODULE_LICENSE("Dual BSD/GPL");