1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 #include "en_accel/en_accel.h"
8 #include "accel/ipsec.h"
9 #include "fpga/ipsec.h"
11 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
12 struct mlx5e_xsk_param *xsk)
14 return params->xdp_prog || xsk;
17 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
18 struct mlx5e_xsk_param *xsk)
25 headroom = NET_IP_ALIGN;
26 if (mlx5e_rx_is_xdp(params, xsk))
27 headroom += XDP_PACKET_HEADROOM;
29 headroom += MLX5_RX_HEADROOM;
34 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
35 struct mlx5e_xsk_param *xsk)
37 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
38 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
40 return linear_rq_headroom + hw_mtu;
43 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
44 struct mlx5e_xsk_param *xsk)
46 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
48 /* AF_XDP doesn't build SKBs in place. */
50 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
52 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
53 * special case. It can run with frames smaller than a page, as it
54 * doesn't allocate pages dynamically. However, here we pretend that
55 * fragments are page-sized: it allows to treat XSK frames like pages
56 * by redirecting alloc and free operations to XSK rings and by using
57 * the fact there are no multiple packets per "page" (which is a frame).
58 * The latter is important, because frames may come in a random order,
59 * and we will have trouble assemblying a real page of multiple frames.
61 if (mlx5e_rx_is_xdp(params, xsk))
62 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
64 /* Even if we can go with a smaller fragment size, we must not put
65 * multiple packets into a single frame.
68 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
73 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
74 struct mlx5e_xsk_param *xsk)
76 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
78 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
81 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
82 struct mlx5e_xsk_param *xsk)
84 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
85 * than one page. For this, check both with and without xsk.
87 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
88 mlx5e_rx_get_linear_frag_sz(params, NULL));
90 return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
91 linear_frag_sz <= PAGE_SIZE;
94 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
95 u8 log_stride_sz, u8 log_num_strides)
97 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
100 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
101 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
104 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
107 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
108 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
110 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
113 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
114 struct mlx5e_params *params,
115 struct mlx5e_xsk_param *xsk)
120 if (!mlx5e_rx_is_linear_skb(params, xsk))
123 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
124 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
126 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
129 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
130 struct mlx5e_xsk_param *xsk)
132 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
134 /* Numbers are unsigned, don't subtract to avoid underflow. */
135 if (params->log_rq_mtu_frames <
136 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
137 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
139 return params->log_rq_mtu_frames - log_pkts_per_wqe;
142 u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
143 struct mlx5e_params *params)
145 return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
148 u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
149 struct mlx5e_params *params)
151 return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
154 u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
155 struct mlx5e_params *params)
157 u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
160 return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
163 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
164 struct mlx5e_params *params,
165 struct mlx5e_xsk_param *xsk)
167 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
168 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
170 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
173 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
174 struct mlx5e_params *params,
175 struct mlx5e_xsk_param *xsk)
177 return MLX5_MPWRQ_LOG_WQE_SZ -
178 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
181 u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
183 #define UMR_WQE_BULK (2)
184 return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
187 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
188 struct mlx5e_params *params,
189 struct mlx5e_xsk_param *xsk)
191 u16 linear_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
193 if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC)
194 return linear_headroom;
196 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
197 return linear_headroom;
199 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
200 return linear_headroom;
205 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
207 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
210 stop_room = mlx5e_tls_get_stop_room(mdev, params);
211 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
213 /* A MPWQE can take up to the maximum-sized WQE + all the normal
214 * stop room can be taken if a new packet breaks the active
215 * MPWQE session and allocates its WQEs right away.
217 stop_room += mlx5e_stop_room_for_max_wqe(mdev);
222 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
224 size_t sq_size = 1 << params->log_sq_size;
227 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
228 if (stop_room >= sq_size) {
229 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
237 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
239 struct dim_cq_moder moder = {};
241 moder.cq_period_mode = cq_period_mode;
242 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
243 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
244 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
245 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
250 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
252 struct dim_cq_moder moder = {};
254 moder.cq_period_mode = cq_period_mode;
255 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
256 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
257 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
258 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
263 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
265 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
266 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
267 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
270 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
272 if (params->tx_dim_enabled) {
273 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
275 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
277 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
281 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
283 if (params->rx_dim_enabled) {
284 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
286 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
288 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
292 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
294 mlx5e_reset_tx_moderation(params, cq_period_mode);
295 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
296 params->tx_cq_moderation.cq_period_mode ==
297 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
300 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
302 mlx5e_reset_rx_moderation(params, cq_period_mode);
303 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
304 params->rx_cq_moderation.cq_period_mode ==
305 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
308 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
313 mlx5e_port_max_linkspeed(mdev, &link_speed);
314 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
315 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
318 #define MLX5E_SLOW_PCI_RATIO (2)
320 return link_speed && pci_bw &&
321 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
324 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
325 struct mlx5e_params *params)
327 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
330 if (mlx5_fpga_is_ipsec_device(mdev))
333 if (params->xdp_prog) {
334 /* XSK params are not considered here. If striding RQ is in use,
335 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
336 * be called with the known XSK params.
338 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
345 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
346 struct mlx5e_params *params)
348 params->log_rq_mtu_frames = is_kdump_kernel() ?
349 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
350 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
352 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
353 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
354 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
355 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
356 BIT(params->log_rq_mtu_frames),
357 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
358 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
361 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
363 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
364 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
365 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
369 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
370 struct mlx5e_params *params)
372 /* Prefer Striding RQ, unless any of the following holds:
373 * - Striding RQ configuration is not possible/supported.
374 * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
375 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
377 * No XSK params: checking the availability of striding RQ in general.
379 if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
380 MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
381 mlx5e_striding_rq_possible(mdev, params) &&
382 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
383 !mlx5e_rx_is_linear_skb(params, NULL)))
384 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
385 mlx5e_set_rq_type(mdev, params);
386 mlx5e_init_rq_type_params(mdev, params);
389 /* Build queue parameters */
391 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
393 *ccp = (struct mlx5e_create_cq_param) {
395 .ch_stats = c->stats,
396 .node = cpu_to_node(c->cpu),
401 static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size)
403 /* Optimization for small packets: the last fragment is bigger than the others. */
404 return first_frag_size + (MLX5E_MAX_RX_FRAGS - 2) * frag_size + PAGE_SIZE;
407 #define DEFAULT_FRAG_SIZE (2048)
409 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
410 struct mlx5e_params *params,
411 struct mlx5e_xsk_param *xsk,
412 struct mlx5e_rq_frags_info *info)
414 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
415 int frag_size_max = DEFAULT_FRAG_SIZE;
416 int first_frag_size_max;
422 if (mlx5_fpga_is_ipsec_device(mdev))
423 byte_count += MLX5E_METADATA_ETHER_LEN;
425 if (mlx5e_rx_is_linear_skb(params, xsk)) {
428 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
429 frag_stride = roundup_pow_of_two(frag_stride);
431 info->arr[0].frag_size = byte_count;
432 info->arr[0].frag_stride = frag_stride;
434 info->wqe_bulk = PAGE_SIZE / frag_stride;
438 headroom = mlx5e_get_linear_rq_headroom(params, xsk);
439 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
441 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
442 if (byte_count > max_mtu) {
443 frag_size_max = PAGE_SIZE;
444 first_frag_size_max = SKB_WITH_OVERHEAD(frag_size_max - headroom);
446 max_mtu = mlx5e_max_nonlinear_mtu(first_frag_size_max, frag_size_max);
447 if (byte_count > max_mtu) {
448 mlx5_core_err(mdev, "MTU %u is too big for non-linear legacy RQ (max %d)\n",
449 params->sw_mtu, max_mtu);
455 while (buf_size < byte_count) {
456 int frag_size = byte_count - buf_size;
459 frag_size = min(frag_size, first_frag_size_max);
460 else if (i < MLX5E_MAX_RX_FRAGS - 1)
461 frag_size = min(frag_size, frag_size_max);
463 info->arr[i].frag_size = frag_size;
464 buf_size += frag_size;
467 /* Ensure that headroom and tailroom are included. */
468 frag_size += headroom;
469 frag_size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
472 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
477 /* number of different wqes sharing a page */
478 info->wqe_bulk = 1 + (info->num_frags % 2);
481 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
482 info->log_num_frags = order_base_2(info->num_frags);
487 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
489 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
492 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
493 sz += sizeof(struct mlx5e_rx_wqe_ll);
495 default: /* MLX5_WQ_TYPE_CYCLIC */
496 sz += sizeof(struct mlx5e_rx_wqe_cyc);
499 return order_base_2(sz);
502 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
503 struct mlx5e_cq_param *param)
505 void *cqc = param->cqc;
507 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
508 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
509 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
512 static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
513 struct mlx5e_params *params,
514 struct mlx5e_xsk_param *xsk)
516 int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
517 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
518 int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
519 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
520 int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
521 int wqe_size = BIT(log_stride_sz) * num_strides;
523 /* +1 is for the case that the pkt_per_rsrv dont consume the reservation
524 * so we get a filler cqe for the rest of the reservation.
526 return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
529 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
530 struct mlx5e_params *params,
531 struct mlx5e_xsk_param *xsk,
532 struct mlx5e_cq_param *param)
534 bool hw_stridx = false;
535 void *cqc = param->cqc;
538 switch (params->rq_wq_type) {
539 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
540 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
541 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
542 log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
544 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
545 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
547 default: /* MLX5_WQ_TYPE_CYCLIC */
548 log_cq_size = params->log_rq_mtu_frames;
551 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
552 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
553 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
554 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
555 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
558 mlx5e_build_common_cq_param(mdev, param);
559 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
562 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
564 bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
565 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
566 MLX5_CAP_GEN(mdev, relaxed_ordering_write);
568 return ro && lro_en ?
569 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
572 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
573 struct mlx5e_params *params,
574 struct mlx5e_xsk_param *xsk,
576 struct mlx5e_rq_param *param)
578 void *rqc = param->rqc;
579 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
583 switch (params->rq_wq_type) {
584 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
585 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
586 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
588 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
589 log_wqe_num_of_strides)) {
591 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
592 log_wqe_stride_size, log_wqe_num_of_strides);
596 MLX5_SET(wq, wq, log_wqe_num_of_strides,
597 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
598 MLX5_SET(wq, wq, log_wqe_stride_size,
599 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
600 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
601 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
602 MLX5_SET(wq, wq, shampo_enable, true);
603 MLX5_SET(wq, wq, log_reservation_size,
604 mlx5e_shampo_get_log_rsrv_size(mdev, params));
606 log_max_num_of_packets_per_reservation,
607 mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
608 MLX5_SET(wq, wq, log_headers_entry_size,
609 mlx5e_shampo_get_log_hd_entry_size(mdev, params));
610 MLX5_SET(rqc, rqc, reservation_timeout,
611 params->packet_merge.timeout);
612 MLX5_SET(rqc, rqc, shampo_match_criteria_type,
613 params->packet_merge.shampo.match_criteria_type);
614 MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
615 params->packet_merge.shampo.alignment_granularity);
619 default: /* MLX5_WQ_TYPE_CYCLIC */
620 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
621 err = mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
624 ndsegs = param->frags_info.num_frags;
627 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
628 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
629 MLX5_SET(wq, wq, log_wq_stride,
630 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
631 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
632 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
633 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
634 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
636 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
637 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
642 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
644 struct mlx5e_rq_param *param)
646 void *rqc = param->rqc;
647 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
649 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
650 MLX5_SET(wq, wq, log_wq_stride,
651 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
652 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
654 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
657 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
658 struct mlx5e_params *params,
659 struct mlx5e_cq_param *param)
661 void *cqc = param->cqc;
663 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
665 mlx5e_build_common_cq_param(mdev, param);
666 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
669 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
670 struct mlx5e_sq_param *param)
672 void *sqc = param->sqc;
673 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
675 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
676 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
678 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
681 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
682 struct mlx5e_params *params,
683 struct mlx5e_sq_param *param)
685 void *sqc = param->sqc;
686 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
689 allow_swp = mlx5_geneve_tx_allowed(mdev) ||
690 !!MLX5_IPSEC_DEV(mdev);
691 mlx5e_build_sq_param_common(mdev, param);
692 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
693 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
694 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
695 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
696 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
699 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
701 struct mlx5e_cq_param *param)
703 void *cqc = param->cqc;
705 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
707 mlx5e_build_common_cq_param(mdev, param);
709 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
712 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
714 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
716 return MLX5_GET(wq, wq, log_wq_sz);
719 /* This function calculates the maximum number of headers entries that are needed
720 * per WQE, the formula is based on the size of the reservations and the
721 * restriction we have about max packets for reservation that is equal to max
722 * headers per reservation.
724 u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
725 struct mlx5e_params *params,
726 struct mlx5e_rq_param *rq_param)
728 int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
729 u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
730 int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
731 u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
732 int wqe_size = BIT(log_stride_sz) * num_strides;
735 /* Assumption: hd_per_wqe % 8 == 0. */
736 hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
737 mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
738 __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
742 /* This function calculates the maximum number of headers entries that are needed
743 * for the WQ, this value is uesed to allocate the header buffer in HW, thus
744 * must be a pow of 2.
746 u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
747 struct mlx5e_params *params,
748 struct mlx5e_rq_param *rq_param)
750 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
751 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
752 u32 hd_per_wqe, hd_per_wq;
754 hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
755 hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
759 static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
760 struct mlx5e_params *params,
761 struct mlx5e_rq_param *rq_param)
763 int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
764 void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
765 int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
768 max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
769 max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
770 max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
771 rest = max_hd_per_wqe % max_klm_per_umr;
772 wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
774 wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
779 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
780 struct mlx5e_params *params,
781 struct mlx5e_rq_param *rqp)
785 /* MLX5_WQ_TYPE_CYCLIC */
786 if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
787 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
789 wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
790 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
791 wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
792 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
795 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
797 if (mlx5e_accel_is_ktls_rx(mdev))
798 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
800 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
803 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
805 struct mlx5e_sq_param *param)
807 void *sqc = param->sqc;
808 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
810 mlx5e_build_sq_param_common(mdev, param);
812 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
813 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
814 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
817 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
819 struct mlx5e_sq_param *param)
821 void *sqc = param->sqc;
822 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
824 mlx5e_build_sq_param_common(mdev, param);
825 param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
826 param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
828 param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
829 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
830 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
831 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
834 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
835 struct mlx5e_params *params,
836 struct mlx5e_sq_param *param)
838 void *sqc = param->sqc;
839 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
841 mlx5e_build_sq_param_common(mdev, param);
842 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
843 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
844 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
847 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
848 struct mlx5e_params *params,
850 struct mlx5e_channel_param *cparam)
852 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
855 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
859 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
860 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
862 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
863 mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
864 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
865 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);