1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
7 #include "en_accel/en_accel.h"
8 #include "accel/ipsec.h"
9 #include "fpga/ipsec.h"
11 static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
12 struct mlx5e_xsk_param *xsk)
14 return params->xdp_prog || xsk;
17 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
18 struct mlx5e_xsk_param *xsk)
25 headroom = NET_IP_ALIGN;
26 if (mlx5e_rx_is_xdp(params, xsk))
27 headroom += XDP_PACKET_HEADROOM;
29 headroom += MLX5_RX_HEADROOM;
34 u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
35 struct mlx5e_xsk_param *xsk)
37 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
38 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
40 return linear_rq_headroom + hw_mtu;
43 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
44 struct mlx5e_xsk_param *xsk)
46 u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
48 /* AF_XDP doesn't build SKBs in place. */
50 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
52 /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
53 * special case. It can run with frames smaller than a page, as it
54 * doesn't allocate pages dynamically. However, here we pretend that
55 * fragments are page-sized: it allows to treat XSK frames like pages
56 * by redirecting alloc and free operations to XSK rings and by using
57 * the fact there are no multiple packets per "page" (which is a frame).
58 * The latter is important, because frames may come in a random order,
59 * and we will have trouble assemblying a real page of multiple frames.
61 if (mlx5e_rx_is_xdp(params, xsk))
62 frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
64 /* Even if we can go with a smaller fragment size, we must not put
65 * multiple packets into a single frame.
68 frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
73 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
74 struct mlx5e_xsk_param *xsk)
76 u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
78 return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
81 bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
82 struct mlx5e_xsk_param *xsk)
84 /* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
85 * than one page. For this, check both with and without xsk.
87 u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
88 mlx5e_rx_get_linear_frag_sz(params, NULL));
90 return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
93 bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
94 u8 log_stride_sz, u8 log_num_strides)
96 if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
99 if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
100 log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
103 if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
106 if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
107 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;
109 return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
112 bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
113 struct mlx5e_params *params,
114 struct mlx5e_xsk_param *xsk)
119 if (!mlx5e_rx_is_linear_skb(params, xsk))
122 log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
123 log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;
125 return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
128 u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
129 struct mlx5e_xsk_param *xsk)
131 u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params, xsk);
133 /* Numbers are unsigned, don't subtract to avoid underflow. */
134 if (params->log_rq_mtu_frames <
135 log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
136 return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
138 return params->log_rq_mtu_frames - log_pkts_per_wqe;
141 u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
142 struct mlx5e_params *params,
143 struct mlx5e_xsk_param *xsk)
145 if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
146 return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
148 return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
151 u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
152 struct mlx5e_params *params,
153 struct mlx5e_xsk_param *xsk)
155 return MLX5_MPWRQ_LOG_WQE_SZ -
156 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
159 u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
160 struct mlx5e_params *params,
161 struct mlx5e_xsk_param *xsk)
163 bool is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
164 mlx5e_rx_is_linear_skb(params, xsk) :
165 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
167 return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
170 u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
172 bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
175 stop_room = mlx5e_tls_get_stop_room(mdev, params);
176 stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
178 /* A MPWQE can take up to the maximum-sized WQE + all the normal
179 * stop room can be taken if a new packet breaks the active
180 * MPWQE session and allocates its WQEs right away.
182 stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
187 int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
189 size_t sq_size = 1 << params->log_sq_size;
192 stop_room = mlx5e_calc_sq_stop_room(mdev, params);
193 if (stop_room >= sq_size) {
194 mlx5_core_err(mdev, "Stop room %u is bigger than the SQ size %zu\n",
202 static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
204 struct dim_cq_moder moder = {};
206 moder.cq_period_mode = cq_period_mode;
207 moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
208 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
209 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
210 moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
215 static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
217 struct dim_cq_moder moder = {};
219 moder.cq_period_mode = cq_period_mode;
220 moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
221 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
222 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
223 moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
228 static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
230 return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
231 DIM_CQ_PERIOD_MODE_START_FROM_CQE :
232 DIM_CQ_PERIOD_MODE_START_FROM_EQE;
235 void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
237 if (params->tx_dim_enabled) {
238 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
240 params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
242 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
246 void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
248 if (params->rx_dim_enabled) {
249 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
251 params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
253 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
257 void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
259 mlx5e_reset_tx_moderation(params, cq_period_mode);
260 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
261 params->tx_cq_moderation.cq_period_mode ==
262 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
265 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
267 mlx5e_reset_rx_moderation(params, cq_period_mode);
268 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
269 params->rx_cq_moderation.cq_period_mode ==
270 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
273 bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
278 mlx5e_port_max_linkspeed(mdev, &link_speed);
279 pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
280 mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
283 #define MLX5E_SLOW_PCI_RATIO (2)
285 return link_speed && pci_bw &&
286 link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
289 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
290 struct mlx5e_params *params)
292 if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
295 if (mlx5_fpga_is_ipsec_device(mdev))
298 if (params->xdp_prog) {
299 /* XSK params are not considered here. If striding RQ is in use,
300 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
301 * be called with the known XSK params.
303 if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
310 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
311 struct mlx5e_params *params)
313 params->log_rq_mtu_frames = is_kdump_kernel() ?
314 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
315 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
317 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
318 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
319 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
320 BIT(mlx5e_mpwqe_get_log_rq_size(params, NULL)) :
321 BIT(params->log_rq_mtu_frames),
322 BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
323 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
326 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
328 params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
329 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
330 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
334 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
335 struct mlx5e_params *params)
337 /* Prefer Striding RQ, unless any of the following holds:
338 * - Striding RQ configuration is not possible/supported.
339 * - Slow PCI heuristic.
340 * - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
342 * No XSK params: checking the availability of striding RQ in general.
344 if (!slow_pci_heuristic(mdev) &&
345 mlx5e_striding_rq_possible(mdev, params) &&
346 (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
347 !mlx5e_rx_is_linear_skb(params, NULL)))
348 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
349 mlx5e_set_rq_type(mdev, params);
350 mlx5e_init_rq_type_params(mdev, params);
353 /* Build queue parameters */
355 void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
357 *ccp = (struct mlx5e_create_cq_param) {
359 .ch_stats = c->stats,
360 .node = cpu_to_node(c->cpu),
365 #define DEFAULT_FRAG_SIZE (2048)
367 static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
368 struct mlx5e_params *params,
369 struct mlx5e_xsk_param *xsk,
370 struct mlx5e_rq_frags_info *info)
372 u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
373 int frag_size_max = DEFAULT_FRAG_SIZE;
377 if (mlx5_fpga_is_ipsec_device(mdev))
378 byte_count += MLX5E_METADATA_ETHER_LEN;
380 if (mlx5e_rx_is_linear_skb(params, xsk)) {
383 frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
384 frag_stride = roundup_pow_of_two(frag_stride);
386 info->arr[0].frag_size = byte_count;
387 info->arr[0].frag_stride = frag_stride;
389 info->wqe_bulk = PAGE_SIZE / frag_stride;
393 if (byte_count > PAGE_SIZE +
394 (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max)
395 frag_size_max = PAGE_SIZE;
398 while (buf_size < byte_count) {
399 int frag_size = byte_count - buf_size;
401 if (i < MLX5E_MAX_RX_FRAGS - 1)
402 frag_size = min(frag_size, frag_size_max);
404 info->arr[i].frag_size = frag_size;
405 info->arr[i].frag_stride = roundup_pow_of_two(frag_size);
407 buf_size += frag_size;
411 /* number of different wqes sharing a page */
412 info->wqe_bulk = 1 + (info->num_frags % 2);
415 info->wqe_bulk = max_t(u8, info->wqe_bulk, 8);
416 info->log_num_frags = order_base_2(info->num_frags);
419 static u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
421 int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs;
424 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
425 sz += sizeof(struct mlx5e_rx_wqe_ll);
427 default: /* MLX5_WQ_TYPE_CYCLIC */
428 sz += sizeof(struct mlx5e_rx_wqe_cyc);
431 return order_base_2(sz);
434 static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
435 struct mlx5e_cq_param *param)
437 void *cqc = param->cqc;
439 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
440 if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
441 MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
444 static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
445 struct mlx5e_params *params,
446 struct mlx5e_xsk_param *xsk,
447 struct mlx5e_cq_param *param)
449 bool hw_stridx = false;
450 void *cqc = param->cqc;
453 switch (params->rq_wq_type) {
454 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
455 log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
456 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
457 hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
459 default: /* MLX5_WQ_TYPE_CYCLIC */
460 log_cq_size = params->log_rq_mtu_frames;
463 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
464 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
465 MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
466 MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
467 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
470 mlx5e_build_common_cq_param(mdev, param);
471 param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
474 static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
476 bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
477 MLX5_CAP_GEN(mdev, relaxed_ordering_write);
479 return ro && params->lro_en ?
480 MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
483 int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
484 struct mlx5e_params *params,
485 struct mlx5e_xsk_param *xsk,
487 struct mlx5e_rq_param *param)
489 void *rqc = param->rqc;
490 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
493 switch (params->rq_wq_type) {
494 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
495 u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
496 u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
498 if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
499 log_wqe_num_of_strides)) {
501 "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
502 log_wqe_stride_size, log_wqe_num_of_strides);
506 MLX5_SET(wq, wq, log_wqe_num_of_strides,
507 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
508 MLX5_SET(wq, wq, log_wqe_stride_size,
509 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
510 MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
513 default: /* MLX5_WQ_TYPE_CYCLIC */
514 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
515 mlx5e_build_rq_frags_info(mdev, params, xsk, ¶m->frags_info);
516 ndsegs = param->frags_info.num_frags;
519 MLX5_SET(wq, wq, wq_type, params->rq_wq_type);
520 MLX5_SET(wq, wq, end_padding_mode, rq_end_pad_mode(mdev, params));
521 MLX5_SET(wq, wq, log_wq_stride,
522 mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs));
523 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
524 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
525 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
526 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
528 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
529 mlx5e_build_rx_cq_param(mdev, params, xsk, ¶m->cqp);
534 void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
536 struct mlx5e_rq_param *param)
538 void *rqc = param->rqc;
539 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
541 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
542 MLX5_SET(wq, wq, log_wq_stride,
543 mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
544 MLX5_SET(rqc, rqc, counter_set_id, q_counter);
546 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
549 void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
550 struct mlx5e_params *params,
551 struct mlx5e_cq_param *param)
553 void *cqc = param->cqc;
555 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
557 mlx5e_build_common_cq_param(mdev, param);
558 param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
561 void mlx5e_build_sq_param_common(struct mlx5_core_dev *mdev,
562 struct mlx5e_sq_param *param)
564 void *sqc = param->sqc;
565 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
567 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
568 MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
570 param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
573 void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
574 struct mlx5e_params *params,
575 struct mlx5e_sq_param *param)
577 void *sqc = param->sqc;
578 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
581 allow_swp = mlx5_geneve_tx_allowed(mdev) ||
582 !!MLX5_IPSEC_DEV(mdev);
583 mlx5e_build_sq_param_common(mdev, param);
584 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
585 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
586 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
587 param->stop_room = mlx5e_calc_sq_stop_room(mdev, params);
588 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
591 static void mlx5e_build_ico_cq_param(struct mlx5_core_dev *mdev,
593 struct mlx5e_cq_param *param)
595 void *cqc = param->cqc;
597 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
599 mlx5e_build_common_cq_param(mdev, param);
601 param->cq_period_mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
604 static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
606 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
608 return MLX5_GET(wq, wq, log_wq_sz);
611 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
612 struct mlx5e_rq_param *rqp)
614 switch (params->rq_wq_type) {
615 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
616 return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
617 order_base_2(MLX5E_UMR_WQEBBS) +
618 mlx5e_get_rq_log_wq_sz(rqp->rqc));
619 default: /* MLX5_WQ_TYPE_CYCLIC */
620 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
624 static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
626 if (mlx5e_accel_is_ktls_rx(mdev))
627 return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
629 return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
632 static void mlx5e_build_icosq_param(struct mlx5_core_dev *mdev,
634 struct mlx5e_sq_param *param)
636 void *sqc = param->sqc;
637 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
639 mlx5e_build_sq_param_common(mdev, param);
641 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
642 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
643 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
646 static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
648 struct mlx5e_sq_param *param)
650 void *sqc = param->sqc;
651 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
653 mlx5e_build_sq_param_common(mdev, param);
654 param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
655 param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
657 param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
658 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
659 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
660 mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
663 void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
664 struct mlx5e_params *params,
665 struct mlx5e_sq_param *param)
667 void *sqc = param->sqc;
668 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
670 mlx5e_build_sq_param_common(mdev, param);
671 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
672 param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
673 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
676 int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
677 struct mlx5e_params *params,
679 struct mlx5e_channel_param *cparam)
681 u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
684 err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
688 icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
689 async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
691 mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
692 mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
693 mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
694 mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);