net: hns3: fix for fraglist SKB headlen not handling correctly
authorHuazhong Tan <tanhuazhong@huawei.com>
Sat, 28 Mar 2020 07:09:56 +0000 (15:09 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 30 Mar 2020 17:57:53 +0000 (10:57 -0700)
When the fraglist SKB headlen is larger than zero, current code
still handle the fraglist SKB linear data as frag data, which may
cause TX error.

This patch adds a new DESC_TYPE_FRAGLIST_SKB type to handle the
mapping and unmapping of the fraglist SKB linear data buffer.

Fixes: 8ae10cfb5089 ("net: hns3: support tx-scatter-gather-fraglist feature")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c

index a3e4081..5587605 100644 (file)
@@ -78,6 +78,7 @@
 
 enum hns_desc_type {
        DESC_TYPE_SKB,
+       DESC_TYPE_FRAGLIST_SKB,
        DESC_TYPE_PAGE,
 };
 
index a7f40aa..6936384 100644 (file)
@@ -1106,6 +1106,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                if (unlikely(ret < 0))
                        return ret;
 
+               dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+       } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+               struct sk_buff *skb = (struct sk_buff *)priv;
+
                dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
        } else {
                frag = (skb_frag_t *)priv;
@@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
                /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
                desc_cb->priv = priv;
                desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
-               desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
-                               DESC_TYPE_SKB : DESC_TYPE_PAGE;
+               desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
+                                 type == DESC_TYPE_SKB) && !k) ?
+                               type : DESC_TYPE_PAGE;
 
                /* now, fill the descriptor */
                desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
@@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
                ring_ptr_move_bw(ring, next_to_use);
 
                /* unmap the descriptor dma address */
-               if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
+               if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
+                   ring->desc_cb[ring->next_to_use].type ==
+                   DESC_TYPE_FRAGLIST_SKB)
                        dma_unmap_single(dev,
                                         ring->desc_cb[ring->next_to_use].dma,
                                        ring->desc_cb[ring->next_to_use].length,
@@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
                goto out;
 
        skb_walk_frags(skb, frag_skb) {
-               ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE);
+               ret = hns3_fill_skb_to_desc(ring, frag_skb,
+                                           DESC_TYPE_FRAGLIST_SKB);
                if (unlikely(ret < 0))
                        goto fill_err;
 
@@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
                              struct hns3_desc_cb *cb)
 {
-       if (cb->type == DESC_TYPE_SKB)
+       if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
        else if (cb->length)