1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
8 * change unexpectedly, and mlx5e has a minimum valid stride size for striding
9 * RQ, keep this check in the driver.
11 #define MLX5E_MIN_XSK_CHUNK_SIZE 2048
13 bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
14 struct mlx5e_xsk_param *xsk,
15 struct mlx5_core_dev *mdev)
17 /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
18 if (xsk->chunk_size > PAGE_SIZE ||
19 xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
22 /* Current MTU and XSK headroom don't allow packets to fit the frames. */
23 if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
26 /* frag_sz is different for regular and XSK RQs, so ensure that linear
27 * SKB mode is possible.
29 switch (params->rq_wq_type) {
30 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
31 return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
32 default: /* MLX5_WQ_TYPE_CYCLIC */
33 return mlx5e_rx_is_linear_skb(params, xsk);
37 static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv,
39 struct mlx5e_sq_param *param)
41 void *sqc = param->sqc;
42 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
44 mlx5e_build_sq_param_common(priv, param);
46 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
49 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
50 struct mlx5e_params *params,
51 struct mlx5e_xsk_param *xsk,
52 struct mlx5e_channel_param *cparam)
54 const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
56 mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
57 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
58 mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq);
59 mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq);
60 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
61 mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq);
64 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
65 struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
66 struct mlx5e_channel *c)
68 struct mlx5e_channel_param *cparam;
69 struct dim_cq_moder icocq_moder = {};
72 if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
75 cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
79 mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
81 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
85 err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
89 err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
93 /* Create a separate SQ, so that when the UMEM is disabled, we could
94 * close this SQ safely and stop receiving CQEs. In other case, e.g., if
95 * the XDPSQ was used instead, we might run into trouble when the UMEM
96 * is disabled and then reenabled, but the SQ continues receiving CQEs
99 err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
101 goto err_close_tx_cq;
103 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
107 /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
108 * triggered and NAPI to be called on the correct CPU.
110 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
112 goto err_close_icocq;
116 spin_lock_init(&c->xskicosq_lock);
118 set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
123 mlx5e_close_cq(&c->xskicosq.cq);
126 mlx5e_close_xdpsq(&c->xsksq);
129 mlx5e_close_cq(&c->xsksq.cq);
132 mlx5e_close_rq(&c->xskrq);
135 mlx5e_close_cq(&c->xskrq.cq);
143 void mlx5e_close_xsk(struct mlx5e_channel *c)
145 clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
146 napi_synchronize(&c->napi);
147 synchronize_rcu(); /* Sync with the XSK wakeup. */
149 mlx5e_close_rq(&c->xskrq);
150 mlx5e_close_cq(&c->xskrq.cq);
151 mlx5e_close_icosq(&c->xskicosq);
152 mlx5e_close_cq(&c->xskicosq.cq);
153 mlx5e_close_xdpsq(&c->xsksq);
154 mlx5e_close_cq(&c->xsksq.cq);
156 memset(&c->xskrq, 0, sizeof(c->xskrq));
157 memset(&c->xsksq, 0, sizeof(c->xsksq));
158 memset(&c->xskicosq, 0, sizeof(c->xskicosq));
161 void mlx5e_activate_xsk(struct mlx5e_channel *c)
163 mlx5e_activate_icosq(&c->xskicosq);
164 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
165 /* TX queue is created active. */
167 spin_lock(&c->xskicosq_lock);
168 mlx5e_trigger_irq(&c->xskicosq);
169 spin_unlock(&c->xskicosq_lock);
172 void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
174 mlx5e_deactivate_rq(&c->xskrq);
175 /* TX queue is disabled on close. */
176 mlx5e_deactivate_icosq(&c->xskicosq);
179 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
181 struct mlx5e_redirect_rqt_param direct_rrp = {
188 u32 rqtn = priv->xsk_tir[ix].rqt.rqtn;
190 return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
193 int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c)
195 return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn);
198 int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix)
200 return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn);
203 int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
207 if (!priv->xsk.refcnt)
210 for (i = 0; i < chs->num; i++) {
211 struct mlx5e_channel *c = chs->c[i];
213 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
216 err = mlx5e_xsk_redirect_rqt_to_channel(priv, c);
224 for (i--; i >= 0; i--) {
225 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
228 mlx5e_xsk_redirect_rqt_to_drop(priv, i);
234 void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
238 if (!priv->xsk.refcnt)
241 for (i = 0; i < chs->num; i++) {
242 if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state))
245 mlx5e_xsk_redirect_rqt_to_drop(priv, i);