1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc */
3 /* Copyright (C) 2021 Corigine, Inc */
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/slab.h>
8 #include <net/xdp_sock_drv.h>
9 #include <trace/events/xdp.h>
13 #include "nfp_net_dp.h"
14 #include "nfp_net_xsk.h"
17 nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
20 unsigned int headroom;
22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);
24 rx_ring->rxds[idx].fld.reserved = 0;
25 rx_ring->rxds[idx].fld.meta_len_dd = 0;
27 rx_ring->xsk_rxbufs[idx].xdp = xdp;
28 rx_ring->xsk_rxbufs[idx].dma_addr =
29 xsk_buff_xdp_get_frame_dma(xdp) + headroom;
32 void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
38 void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
41 xsk_buff_free(rxbuf->xdp);
43 nfp_net_xsk_rx_unstash(rxbuf);
46 void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
53 for (i = 0; i < rx_ring->cnt - 1; i++)
54 nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
57 void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
59 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
60 struct xsk_buff_pool *pool = r_vec->xsk_pool;
61 unsigned int wr_idx, wr_ptr_add = 0;
64 while (nfp_net_rx_space(rx_ring)) {
65 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
67 xdp = xsk_buff_alloc(pool);
71 nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);
73 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
74 rx_ring->xsk_rxbufs[wr_idx].dma_addr);
80 /* Ensure all records are visible before incrementing write counter. */
82 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
85 void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
86 struct nfp_net_xsk_rx_buf *xrxbuf)
88 u64_stats_update_begin(&r_vec->rx_sync);
90 u64_stats_update_end(&r_vec->rx_sync);
92 nfp_net_xsk_rx_free(xrxbuf);
95 static void nfp_net_xsk_pool_unmap(struct device *dev,
96 struct xsk_buff_pool *pool)
98 return xsk_pool_dma_unmap(pool, 0);
101 static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
103 return xsk_pool_dma_map(pool, dev, 0);
106 int nfp_net_xsk_setup_pool(struct net_device *netdev,
107 struct xsk_buff_pool *pool, u16 queue_id)
109 struct nfp_net *nn = netdev_priv(netdev);
111 struct xsk_buff_pool *prev_pool;
112 struct nfp_net_dp *dp;
115 /* NFDK doesn't implement xsk yet. */
116 if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
119 /* Reject on old FWs so we can drop some checks on datapath. */
120 if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
122 if (!nn->dp.chained_metadata_format)
127 err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
133 dp = nfp_net_clone_dp(nn);
139 prev_pool = dp->xsk_pools[queue_id];
140 dp->xsk_pools[queue_id] = pool;
142 err = nfp_net_ring_reconfig(nn, dp, NULL);
148 nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);
153 nfp_net_xsk_pool_unmap(nn->dp.dev, pool);
158 int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
160 struct nfp_net *nn = netdev_priv(netdev);
162 /* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
163 * so it must be within our vector range. Moreover, our napi structs
164 * are statically allocated, so we can always kick them without worrying
165 * if reconfig is in progress or interface down.
167 napi_schedule(&nn->r_vecs[queue_id].napi);