prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
- if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi;
for (pwi = head_wi; pwi < wi; pwi++)
- pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+ pwi->frag_page->frags++;
}
return NULL; /* page/packet was consumed by XDP */
}
rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- /* do not return page to cache,
- * it will be returned on XDP_TX completion.
- */
- wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
- }
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ wi->frag_page->frags++;
goto wq_cyc_pop;
}
rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
- if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- /* do not return page to cache,
- * it will be returned on XDP_TX completion.
- */
- wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
- }
+ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
+ wi->frag_page->frags++;
goto wq_cyc_pop;
}
if (prog) {
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
- int i;
+ struct mlx5e_frag_page *pfp;
+
+ for (pfp = head_page; pfp < frag_page; pfp++)
+ pfp->frags++;
- for (i = 0; i < sinfo->nr_frags; i++)
- /* non-atomic */
- __set_bit(page_idx + i, wi->skip_release_bitmap);
- return NULL;
+ wi->linear_page.frags++;
}
mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */
cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
- __set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */
+ frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */
}