net: jme: switch from 'pci_' to 'dma_' API
authorChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Sun, 22 Aug 2021 06:48:40 +0000 (08:48 +0200)
committerDavid S. Miller <davem@davemloft.net>
Mon, 23 Aug 2021 10:56:57 +0000 (11:56 +0100)
The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/jme.c

index 1251b74..438c560 100644 (file)
@@ -734,17 +734,17 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i)
        if (unlikely(!skb))
                return -ENOMEM;
 
-       mapping = pci_map_page(jme->pdev, virt_to_page(skb->data),
+       mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data),
                               offset_in_page(skb->data), skb_tailroom(skb),
-                              PCI_DMA_FROMDEVICE);
-       if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) {
+                              DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) {
                dev_kfree_skb(skb);
                return -ENOMEM;
        }
 
        if (likely(rxbi->mapping))
-               pci_unmap_page(jme->pdev, rxbi->mapping,
-                              rxbi->len, PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
+                              DMA_FROM_DEVICE);
 
        rxbi->skb = skb;
        rxbi->len = skb_tailroom(skb);
@@ -760,10 +760,8 @@ jme_free_rx_buf(struct jme_adapter *jme, int i)
        rxbi += i;
 
        if (rxbi->skb) {
-               pci_unmap_page(jme->pdev,
-                                rxbi->mapping,
-                                rxbi->len,
-                                PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len,
+                              DMA_FROM_DEVICE);
                dev_kfree_skb(rxbi->skb);
                rxbi->skb = NULL;
                rxbi->mapping = 0;
@@ -1005,16 +1003,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
        rxbi += idx;
 
        skb = rxbi->skb;
-       pci_dma_sync_single_for_cpu(jme->pdev,
-                                       rxbi->mapping,
-                                       rxbi->len,
-                                       PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len,
+                               DMA_FROM_DEVICE);
 
        if (unlikely(jme_make_new_rx_buf(jme, idx))) {
-               pci_dma_sync_single_for_device(jme->pdev,
-                                               rxbi->mapping,
-                                               rxbi->len,
-                                               PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping,
+                                          rxbi->len, DMA_FROM_DEVICE);
 
                ++(NET_STAT(jme).rx_dropped);
        } else {
@@ -1453,10 +1447,9 @@ static void jme_tx_clean_tasklet(struct tasklet_struct *t)
                                ttxbi = txbi + ((i + j) & (mask));
                                txdesc[(i + j) & (mask)].dw[0] = 0;
 
-                               pci_unmap_page(jme->pdev,
-                                                ttxbi->mapping,
-                                                ttxbi->len,
-                                                PCI_DMA_TODEVICE);
+                               dma_unmap_page(&jme->pdev->dev,
+                                              ttxbi->mapping, ttxbi->len,
+                                              DMA_TO_DEVICE);
 
                                ttxbi->mapping = 0;
                                ttxbi->len = 0;
@@ -1966,19 +1959,13 @@ jme_fill_tx_map(struct pci_dev *pdev,
 {
        dma_addr_t dmaaddr;
 
-       dmaaddr = pci_map_page(pdev,
-                               page,
-                               page_offset,
-                               len,
-                               PCI_DMA_TODEVICE);
+       dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len,
+                              DMA_TO_DEVICE);
 
-       if (unlikely(pci_dma_mapping_error(pdev, dmaaddr)))
+       if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr)))
                return -EINVAL;
 
-       pci_dma_sync_single_for_device(pdev,
-                                      dmaaddr,
-                                      len,
-                                      PCI_DMA_TODEVICE);
+       dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE);
 
        txdesc->dw[0] = 0;
        txdesc->dw[1] = 0;
@@ -2003,10 +1990,8 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
 
        for (j = 0 ; j < count ; j++) {
                ctxbi = txbi + ((startidx + j + 2) & (mask));
-               pci_unmap_page(jme->pdev,
-                               ctxbi->mapping,
-                               ctxbi->len,
-                               PCI_DMA_TODEVICE);
+               dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len,
+                              DMA_TO_DEVICE);
 
                ctxbi->mapping = 0;
                ctxbi->len = 0;
@@ -2859,18 +2844,15 @@ static int
 jme_pci_dma64(struct pci_dev *pdev)
 {
        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
-               if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
-                       return 1;
+           !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
+               return 1;
 
        if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
-               if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
-                       return 1;
+           !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
+               return 1;
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
-               if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
-                       return 0;
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+               return 0;
 
        return -1;
 }