skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
if (unlikely(!skb))
goto skb_alloc_failed;
- mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(tp->pci_dev, mapping))
+ mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&tp->pci_dev->dev, mapping))
goto out;
sis190_map_to_asic(desc, mapping, rx_buf_sz);
if (!skb)
goto out;
- pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
*sk_buff = skb;
done = true;
if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
- pci_dma_sync_single_for_device(pdev, addr,
- tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev, addr,
+ tp->rx_buf_sz,
+ DMA_FROM_DEVICE);
sis190_give_to_asic(desc, tp->rx_buf_sz);
} else {
- pci_unmap_single(pdev, addr, tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, addr,
+ tp->rx_buf_sz,
+ DMA_FROM_DEVICE);
tp->Rx_skbuff[entry] = NULL;
sis190_make_unusable_by_asic(desc);
}
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
- pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len,
+ DMA_TO_DEVICE);
memset(desc, 0x00, sizeof(*desc));
}
{
struct pci_dev *pdev = tp->pci_dev;
- pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+ DMA_FROM_DEVICE);
dev_kfree_skb(*sk_buff);
*sk_buff = NULL;
sis190_make_unusable_by_asic(desc);
* Rx and Tx descriptors need 256 bytes alignment.
* pci_alloc_consistent() guarantees a stronger alignment.
*/
- tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+ tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
+ &tp->tx_dma, GFP_KERNEL);
if (!tp->TxDescRing)
goto out;
- tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+ tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES,
+ &tp->rx_dma, GFP_KERNEL);
if (!tp->RxDescRing)
goto err_free_tx_0;
sis190_delete_timer(dev);
sis190_rx_clear(tp);
err_free_rx_1:
- pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+ dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
+ tp->rx_dma);
err_free_tx_0:
- pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+ dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
+ tp->tx_dma);
goto out;
}
free_irq(pdev->irq, dev);
- pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
- pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+ dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
+ tp->tx_dma);
+ dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
+ tp->rx_dma);
tp->TxDescRing = NULL;
tp->RxDescRing = NULL;
return NETDEV_TX_BUSY;
}
- mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
+ mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&tp->pci_dev->dev, mapping)) {
netif_err(tp, tx_err, dev,
"PCI mapping failed, dropping packet");
return NETDEV_TX_BUSY;
goto err_pci_disable_2;
}
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc < 0) {
if (netif_msg_probe(tp))
pr_err("%s: DMA configuration failed\n",
ret = pci_enable_device(pci_dev);
if(ret) return ret;
- i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if(i){
printk(KERN_ERR "sis900.c: architecture does not support "
"32bit PCI busmaster DMA\n");
pci_set_drvdata(pci_dev, net_dev);
- ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pci_dev->dev, TX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
if (!ring_space) {
ret = -ENOMEM;
goto err_out_unmap;
sis_priv->tx_ring = ring_space;
sis_priv->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pci_dev->dev, RX_TOTAL_SIZE,
+ &ring_dma, GFP_KERNEL);
if (!ring_space) {
ret = -ENOMEM;
goto err_unmap_tx;
return 0;
err_unmap_rx:
- pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
- sis_priv->rx_ring_dma);
+ dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
err_unmap_tx:
- pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
- sis_priv->tx_ring_dma);
+ dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
err_out_unmap:
pci_iounmap(pci_dev, ioaddr);
err_out_cleardev:
}
sis_priv->rx_skbuff[i] = skb;
sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
- sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
- skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
- sis_priv->rx_ring[i].bufptr))) {
+ sis_priv->rx_ring[i].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
+ skb->data,
+ RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+ sis_priv->rx_ring[i].bufptr))) {
dev_kfree_skb(skb);
sis_priv->rx_skbuff[i] = NULL;
break;
struct sk_buff *skb = sis_priv->tx_skbuff[i];
if (skb) {
- pci_unmap_single(sis_priv->pci_dev,
- sis_priv->tx_ring[i].bufptr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&sis_priv->pci_dev->dev,
+ sis_priv->tx_ring[i].bufptr,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
sis_priv->tx_skbuff[i] = NULL;
sis_priv->tx_ring[i].cmdsts = 0;
sis_priv->tx_skbuff[entry] = skb;
/* set the transmit buffer descriptor and enable Transmit State Machine */
- sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
- sis_priv->tx_ring[entry].bufptr))) {
+ sis_priv->tx_ring[entry].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+ sis_priv->tx_ring[entry].bufptr))) {
dev_kfree_skb_any(skb);
sis_priv->tx_skbuff[entry] = NULL;
net_dev->stats.tx_dropped++;
struct sk_buff * skb;
struct sk_buff * rx_skb;
- pci_unmap_single(sis_priv->pci_dev,
- sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&sis_priv->pci_dev->dev,
+ sis_priv->rx_ring[entry].bufptr,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
/* refill the Rx buffer, what if there is not enough
* memory for new socket buffer ?? */
sis_priv->rx_skbuff[entry] = skb;
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
sis_priv->rx_ring[entry].bufptr =
- pci_map_single(sis_priv->pci_dev, skb->data,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
- sis_priv->rx_ring[entry].bufptr))) {
+ dma_map_single(&sis_priv->pci_dev->dev,
+ skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+ sis_priv->rx_ring[entry].bufptr))) {
dev_kfree_skb_irq(skb);
sis_priv->rx_skbuff[entry] = NULL;
break;
sis_priv->rx_skbuff[entry] = skb;
sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
sis_priv->rx_ring[entry].bufptr =
- pci_map_single(sis_priv->pci_dev, skb->data,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
- sis_priv->rx_ring[entry].bufptr))) {
+ dma_map_single(&sis_priv->pci_dev->dev,
+ skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+ sis_priv->rx_ring[entry].bufptr))) {
dev_kfree_skb_irq(skb);
sis_priv->rx_skbuff[entry] = NULL;
break;
}
/* Free the original skb. */
skb = sis_priv->tx_skbuff[entry];
- pci_unmap_single(sis_priv->pci_dev,
- sis_priv->tx_ring[entry].bufptr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&sis_priv->pci_dev->dev,
+ sis_priv->tx_ring[entry].bufptr, skb->len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
sis_priv->tx_skbuff[entry] = NULL;
sis_priv->tx_ring[entry].bufptr = 0;
for (i = 0; i < NUM_RX_DESC; i++) {
skb = sis_priv->rx_skbuff[i];
if (skb) {
- pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev,
+ sis_priv->rx_ring[i].bufptr,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
sis_priv->rx_skbuff[i] = NULL;
}
for (i = 0; i < NUM_TX_DESC; i++) {
skb = sis_priv->tx_skbuff[i];
if (skb) {
- pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev,
+ sis_priv->tx_ring[i].bufptr,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
sis_priv->tx_skbuff[i] = NULL;
}
kfree(phy);
}
- pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
- sis_priv->rx_ring_dma);
- pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
- sis_priv->tx_ring_dma);
+ dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+ sis_priv->rx_ring_dma);
+ dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+ sis_priv->tx_ring_dma);
pci_iounmap(pci_dev, sis_priv->ioaddr);
free_netdev(net_dev);
pci_release_regions(pci_dev);