struct sk_buff *skb = re->skb;
int i;
- re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, re->data_addr))
+ re->data_addr = dma_map_single(&pdev->dev, skb->data, size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, re->data_addr))
goto mapping_error;
dma_unmap_len_set(re, data_size, size);
map_page_error:
while (--i >= 0) {
- pci_unmap_page(pdev, re->frag_addr[i],
+ dma_unmap_page(&pdev->dev, re->frag_addr[i],
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
- pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, re->data_addr,
+ dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
mapping_error:
if (net_ratelimit())
struct sk_buff *skb = re->skb;
int i;
- pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, re->data_addr,
+ dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- pci_unmap_page(pdev, re->frag_addr[i],
+ dma_unmap_page(&pdev->dev, re->frag_addr[i],
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/* Tell chip where to start receive checksum.
struct sky2_hw *hw = sky2->hw;
/* must be power of 2 */
- sky2->tx_le = pci_alloc_consistent(hw->pdev,
- sky2->tx_ring_size *
- sizeof(struct sky2_tx_le),
- &sky2->tx_le_map);
+ sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev,
+ sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+ &sky2->tx_le_map, GFP_KERNEL);
if (!sky2->tx_le)
goto nomem;
if (!sky2->tx_ring)
goto nomem;
- sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES,
- &sky2->rx_le_map);
+ sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES,
+ &sky2->rx_le_map, GFP_KERNEL);
if (!sky2->rx_le)
goto nomem;
sky2_rx_clean(sky2);
if (sky2->rx_le) {
- pci_free_consistent(hw->pdev, RX_LE_BYTES,
- sky2->rx_le, sky2->rx_le_map);
+ dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le,
+ sky2->rx_le_map);
sky2->rx_le = NULL;
}
if (sky2->tx_le) {
- pci_free_consistent(hw->pdev,
- sky2->tx_ring_size * sizeof(struct sky2_tx_le),
- sky2->tx_le, sky2->tx_le_map);
+ dma_free_coherent(&hw->pdev->dev,
+ sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+ sky2->tx_le, sky2->tx_le_map);
sky2->tx_le = NULL;
}
kfree(sky2->tx_ring);
static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
- pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
- dma_unmap_len(re, maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen), DMA_TO_DEVICE);
else if (re->flags & TX_MAP_PAGE)
- pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
- dma_unmap_len(re, maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr),
+ dma_unmap_len(re, maplen), DMA_TO_DEVICE);
re->flags = 0;
}
return NETDEV_TX_BUSY;
len = skb_headlen(skb);
- mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
- if (pci_dma_mapping_error(hw->pdev, mapping))
+ if (dma_mapping_error(&hw->pdev->dev, mapping))
goto mapping_error;
slot = sky2->tx_prod;
skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
if (likely(skb)) {
- pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
- length, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr,
+ length, DMA_FROM_DEVICE);
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb_copy_hash(skb, re->skb);
__vlan_hwaccel_copy_tag(skb, re->skb);
- pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
- length, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&sky2->hw->pdev->dev,
+ re->data_addr, length,
+ DMA_FROM_DEVICE);
__vlan_hwaccel_clear_tag(re->skb);
skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
pci_set_master(pdev);
if (sizeof(dma_addr_t) > sizeof(u32) &&
- !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err < 0) {
dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
"for consistent allocations\n");
goto err_out_free_regions;
}
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_out_free_regions;
/* ring for status responses */
hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
- hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
- &hw->st_dma);
+ hw->st_le = dma_alloc_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ &hw->st_dma, GFP_KERNEL);
if (!hw->st_le) {
err = -ENOMEM;
goto err_out_reset;
pci_disable_msi(pdev);
free_netdev(dev);
err_out_free_pci:
- pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
- hw->st_le, hw->st_dma);
+ dma_free_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
err_out_reset:
sky2_write8(hw, B0_CTST, CS_RST_SET);
err_out_iounmap:
if (hw->flags & SKY2_HW_USE_MSI)
pci_disable_msi(pdev);
- pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
- hw->st_le, hw->st_dma);
+ dma_free_coherent(&pdev->dev,
+ hw->st_size * sizeof(struct sky2_status_le),
+ hw->st_le, hw->st_dma);
pci_release_regions(pdev);
pci_disable_device(pdev);