struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
+ struct mlx5e_wqe_frag_info *head_wi = wi;
u16 rx_headroom = rq->buff.headroom;
struct mlx5e_dma_info *di = wi->di;
+ struct skb_shared_info *sinfo;
u32 frag_consumed_bytes;
- u32 first_frag_size;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ u32 truesize;
void *va;
va = page_address(di->page) + wi->offset;
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- first_frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + frag_consumed_bytes);
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
- first_frag_size, DMA_FROM_DEVICE);
+ rq->buff.frame0_sz, DMA_FROM_DEVICE);
net_prefetch(va + rx_headroom);
- /* XDP is not supported in this configuration, as incoming packets
- * might spread among multiple pages.
- */
- skb = mlx5e_build_linear_skb(rq, va, first_frag_size, rx_headroom,
- frag_consumed_bytes, 0);
- if (unlikely(!skb))
- return NULL;
-
- page_ref_inc(di->page);
+ mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
+ sinfo = xdp_get_shared_info_from_buff(&xdp);
+ truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
while (cqe_bcnt) {
+ skb_frag_t *frag;
+
+ di = wi->di;
+
frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
- mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset,
- frag_consumed_bytes, frag_info->frag_stride);
+ dma_sync_single_for_cpu(rq->pdev, di->addr + wi->offset,
+ frag_consumed_bytes, DMA_FROM_DEVICE);
+
+ if (!xdp_buff_has_frags(&xdp)) {
+ /* Init on the first fragment to avoid cold cache access
+ * when possible.
+ */
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(&xdp);
+ }
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ __skb_frag_set_page(frag, di->page);
+ skb_frag_off_set(frag, wi->offset);
+ skb_frag_size_set(frag, frag_consumed_bytes);
+
+ if (page_is_pfmemalloc(di->page))
+ xdp_buff_set_frag_pfmemalloc(&xdp);
+
+ sinfo->xdp_frags_size += frag_consumed_bytes;
+ truesize += frag_info->frag_stride;
+
cqe_bcnt -= frag_consumed_bytes;
frag_info++;
wi++;
}
+ di = head_wi->di;
+
+ skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
+ xdp.data - xdp.data_hard_start,
+ xdp.data_end - xdp.data,
+ xdp.data - xdp.data_meta);
+ if (unlikely(!skb))
+ return NULL;
+
+ page_ref_inc(di->page);
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ int i;
+
+ /* sinfo->nr_frags is reset by build_skb, calculate again. */
+ xdp_update_skb_shared_info(skb, wi - head_wi - 1,
+ sinfo->xdp_frags_size, truesize,
+ xdp_buff_is_frag_pfmemalloc(&xdp));
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+
return skb;
}