return true;
}
-static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
- unsigned int *off, unsigned int *len)
-{
- struct xdp_buff xdp;
- void *orig_data;
- int ret;
-
- xdp.data_hard_start = hard_start;
- xdp.data = data + *off;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = data + *off + *len;
-
- orig_data = xdp.data;
- ret = bpf_prog_run_xdp(prog, &xdp);
-
- *len -= xdp.data - orig_data;
- *off += xdp.data - orig_data;
-
- return ret;
-}
-
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
struct nfp_meta_parsed meta;
struct net_device *netdev;
dma_addr_t new_dma_addr;
+ u32 meta_len_xdp = 0;
void *new_frag;
idx = D_IDX(rx_ring, rx_ring->rd_p);
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp) && !meta.portid) {
+ void *orig_data = rxbuf->frag + pkt_off;
unsigned int dma_off;
- void *hard_start;
+ struct xdp_buff xdp;
int act;
- hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
+ xdp.data = orig_data;
+ xdp.data_meta = orig_data;
+ xdp.data_end = orig_data + pkt_len;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ pkt_len -= xdp.data - orig_data;
+ pkt_off += xdp.data - orig_data;
- act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
- &pkt_off, &pkt_len);
switch (act) {
case XDP_PASS:
+ meta_len_xdp = xdp.data - xdp.data_meta;
break;
case XDP_TX:
dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(rxd->rxd.vlan));
+ if (meta_len_xdp)
+ skb_metadata_set(skb, meta_len_xdp);
napi_gro_receive(&rx_ring->r_vec->napi, skb);
}