net/mlx5e: Fix page DMA map/unmap attributes
authorAya Levin <ayal@nvidia.com>
Thu, 23 Dec 2021 12:38:28 +0000 (14:38 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 7 Jan 2022 00:55:39 +0000 (16:55 -0800)
Driver initiates DMA sync, hence it may skip CPU sync. Add
DMA_ATTR_SKIP_CPU_SYNC as input attribute both to dma_map_page and
dma_unmap_page to avoid redundant sync with the CPU.
When forcing the device to work with SWIOTLB, the extra sync might cause
data corruption. The driver unmaps the whole page while the hardware
used just a part of the bounce buffer. So syncing overrides the entire
page with bounce buffer that only partially contains real data.

Fixes: bc77b240b3c5 ("net/mlx5e: Add fragmented memory support for RX multi packet WQE")
Fixes: db05815b36cb ("net/mlx5e: Add XSK zero-copy support")
Signed-off-by: Aya Levin <ayal@nvidia.com>
Reviewed-by: Gal Pressman <gal@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c

index 7b562d2..279cd8f 100644 (file)
@@ -11,13 +11,13 @@ static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
 {
        struct device *dev = mlx5_core_dma_dev(priv->mdev);
 
-       return xsk_pool_dma_map(pool, dev, 0);
+       return xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
                                 struct xsk_buff_pool *pool)
 {
-       return xsk_pool_dma_unmap(pool, 0);
+       return xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
index 793511d..dfc6604 100644 (file)
@@ -278,8 +278,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
        if (unlikely(!dma_info->page))
                return -ENOMEM;
 
-       dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0,
-                                     PAGE_SIZE, rq->buff.map_dir);
+       dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE,
+                                           rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC);
        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
                page_pool_recycle_direct(rq->page_pool, dma_info->page);
                dma_info->page = NULL;
@@ -300,7 +300,8 @@ static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
 
 void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info)
 {
-       dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir);
+       dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir,
+                            DMA_ATTR_SKIP_CPU_SYNC);
 }
 
 void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,