1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
8 /* Minimum supported BW share value by the HW is 1 Mbit/sec */
9 #define MLX5_MIN_BW_SHARE 1
11 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
12 min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit)
14 struct mlx5_esw_rate_group {
19 struct list_head list;
22 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
23 u32 parent_ix, u32 tsar_ix,
24 u32 max_rate, u32 bw_share)
28 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
31 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
32 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
33 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
34 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
35 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
37 return mlx5_modify_scheduling_element_cmd(dev,
38 SCHEDULING_HIERARCHY_E_SWITCH,
44 static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
45 u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
47 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
48 struct mlx5_core_dev *dev = esw->dev;
51 err = esw_qos_tsar_config(dev, sched_ctx,
52 esw->qos.root_tsar_ix, group->tsar_ix,
55 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
60 static int esw_qos_vport_config(struct mlx5_eswitch *esw,
61 struct mlx5_vport *vport,
62 u32 max_rate, u32 bw_share,
63 struct netlink_ext_ack *extack)
65 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
66 struct mlx5_esw_rate_group *group = vport->qos.group;
67 struct mlx5_core_dev *dev = esw->dev;
72 if (!vport->qos.enabled)
75 parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
76 MLX5_SET(scheduling_context, sched_ctx, element_type,
77 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
78 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
80 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
82 err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
86 "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
88 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed");
95 static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
96 struct mlx5_esw_rate_group *group,
99 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
100 struct mlx5_vport *evport;
101 u32 max_guarantee = 0;
105 struct mlx5_esw_rate_group *group;
107 list_for_each_entry(group, &esw->qos.groups, list) {
108 if (group->min_rate < max_guarantee)
110 max_guarantee = group->min_rate;
113 mlx5_esw_for_each_vport(esw, i, evport) {
114 if (!evport->enabled || !evport->qos.enabled ||
115 evport->qos.group != group || evport->qos.min_rate < max_guarantee)
117 max_guarantee = evport->qos.min_rate;
122 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
124 /* If vports min rate divider is 0 but their group has bw_share configured, then
125 * need to set bw_share for vports to minimal value.
127 if (!group_level && !max_guarantee && group->bw_share)
132 static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max)
135 return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max);
140 static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw,
141 struct mlx5_esw_rate_group *group,
142 struct netlink_ext_ack *extack)
144 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
145 u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false);
146 struct mlx5_vport *evport;
151 mlx5_esw_for_each_vport(esw, i, evport) {
152 if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group)
154 bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share);
156 if (bw_share == evport->qos.bw_share)
159 err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack);
163 evport->qos.bw_share = bw_share;
169 static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider,
170 struct netlink_ext_ack *extack)
172 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
173 struct mlx5_esw_rate_group *group;
177 list_for_each_entry(group, &esw->qos.groups, list) {
178 bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share);
180 if (bw_share == group->bw_share)
183 err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack);
187 group->bw_share = bw_share;
189 /* All the group's vports need to be set with default bw_share
190 * to enable them with QOS
192 err = esw_qos_normalize_vports_min_rate(esw, group, extack);
201 int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw,
202 struct mlx5_vport *evport,
204 struct netlink_ext_ack *extack)
206 u32 fw_max_bw_share, previous_min_rate;
207 bool min_rate_supported;
210 lockdep_assert_held(&esw->state_lock);
211 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
212 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
213 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
214 if (min_rate && !min_rate_supported)
216 if (min_rate == evport->qos.min_rate)
219 previous_min_rate = evport->qos.min_rate;
220 evport->qos.min_rate = min_rate;
221 err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack);
223 evport->qos.min_rate = previous_min_rate;
228 int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw,
229 struct mlx5_vport *evport,
231 struct netlink_ext_ack *extack)
233 u32 act_max_rate = max_rate;
234 bool max_rate_supported;
237 lockdep_assert_held(&esw->state_lock);
238 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
240 if (max_rate && !max_rate_supported)
242 if (max_rate == evport->qos.max_rate)
245 /* If parent group has rate limit need to set to group
246 * value when new max rate is 0.
248 if (evport->qos.group && !max_rate)
249 act_max_rate = evport->qos.group->max_rate;
251 err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack);
254 evport->qos.max_rate = max_rate;
259 static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group,
260 u32 min_rate, struct netlink_ext_ack *extack)
262 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
263 struct mlx5_core_dev *dev = esw->dev;
264 u32 previous_min_rate, divider;
267 if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE))
270 if (min_rate == group->min_rate)
273 previous_min_rate = group->min_rate;
274 group->min_rate = min_rate;
275 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
276 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
278 group->min_rate = previous_min_rate;
279 NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed");
281 /* Attempt restoring previous configuration */
282 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
283 if (esw_qos_normalize_groups_min_rate(esw, divider, extack))
284 NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
290 static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
291 struct mlx5_esw_rate_group *group,
292 u32 max_rate, struct netlink_ext_ack *extack)
294 struct mlx5_vport *vport;
298 if (group->max_rate == max_rate)
301 err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack);
305 group->max_rate = max_rate;
307 /* Any unlimited vports in the group should be set
308 * with the value of the group.
310 mlx5_esw_for_each_vport(esw, i, vport) {
311 if (!vport->enabled || !vport->qos.enabled ||
312 vport->qos.group != group || vport->qos.max_rate)
315 err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack);
317 NL_SET_ERR_MSG_MOD(extack,
318 "E-Switch vport implicit rate limit setting failed");
324 static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
325 struct mlx5_vport *vport,
326 u32 max_rate, u32 bw_share)
328 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
329 struct mlx5_esw_rate_group *group = vport->qos.group;
330 struct mlx5_core_dev *dev = esw->dev;
335 parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
336 MLX5_SET(scheduling_context, sched_ctx, element_type,
337 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
338 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
339 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
340 MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix);
341 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
342 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
344 err = mlx5_create_scheduling_element_cmd(dev,
345 SCHEDULING_HIERARCHY_E_SWITCH,
347 &vport->qos.esw_tsar_ix);
349 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
357 static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw,
358 struct mlx5_vport *vport,
359 struct mlx5_esw_rate_group *curr_group,
360 struct mlx5_esw_rate_group *new_group,
361 struct netlink_ext_ack *extack)
366 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
367 SCHEDULING_HIERARCHY_E_SWITCH,
368 vport->qos.esw_tsar_ix);
370 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed");
374 vport->qos.group = new_group;
375 max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
377 /* If vport is unlimited, we set the group's value.
378 * Therefore, if the group is limited it will apply to
379 * the vport as well and if not, vport will remain unlimited.
381 err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share);
383 NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed.");
390 vport->qos.group = curr_group;
391 max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
392 if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share))
393 esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
399 static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
400 struct mlx5_vport *vport,
401 struct mlx5_esw_rate_group *group,
402 struct netlink_ext_ack *extack)
404 struct mlx5_esw_rate_group *new_group, *curr_group;
410 curr_group = vport->qos.group;
411 new_group = group ?: esw->qos.group0;
412 if (curr_group == new_group)
415 err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack);
419 /* Recalculate bw share weights of old and new groups */
420 if (vport->qos.bw_share) {
421 esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
422 esw_qos_normalize_vports_min_rate(esw, new_group, extack);
428 static struct mlx5_esw_rate_group *
429 esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
431 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
432 struct mlx5_esw_rate_group *group;
436 if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth))
437 return ERR_PTR(-EOPNOTSUPP);
439 group = kzalloc(sizeof(*group), GFP_KERNEL);
441 return ERR_PTR(-ENOMEM);
443 MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
444 esw->qos.root_tsar_ix);
445 err = mlx5_create_scheduling_element_cmd(esw->dev,
446 SCHEDULING_HIERARCHY_E_SWITCH,
450 NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
454 list_add_tail(&group->list, &esw->qos.groups);
456 divider = esw_qos_calculate_min_rate_divider(esw, group, true);
458 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
460 NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
468 list_del(&group->list);
469 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
470 SCHEDULING_HIERARCHY_E_SWITCH,
473 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
479 static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
480 struct mlx5_esw_rate_group *group,
481 struct netlink_ext_ack *extack)
486 list_del(&group->list);
488 divider = esw_qos_calculate_min_rate_divider(esw, NULL, true);
489 err = esw_qos_normalize_groups_min_rate(esw, divider, extack);
491 NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed");
493 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
494 SCHEDULING_HIERARCHY_E_SWITCH,
497 NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
503 static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
506 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
507 return MLX5_CAP_QOS(dev, esw_element_type) &
508 ELEMENT_TYPE_CAP_MASK_TASR;
509 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
510 return MLX5_CAP_QOS(dev, esw_element_type) &
511 ELEMENT_TYPE_CAP_MASK_VPORT;
512 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
513 return MLX5_CAP_QOS(dev, esw_element_type) &
514 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
515 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
516 return MLX5_CAP_QOS(dev, esw_element_type) &
517 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
522 void mlx5_esw_qos_create(struct mlx5_eswitch *esw)
524 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
525 struct mlx5_core_dev *dev = esw->dev;
529 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
532 if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
535 mutex_lock(&esw->state_lock);
536 if (esw->qos.enabled)
539 MLX5_SET(scheduling_context, tsar_ctx, element_type,
540 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
542 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
543 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
545 err = mlx5_create_scheduling_element_cmd(dev,
546 SCHEDULING_HIERARCHY_E_SWITCH,
548 &esw->qos.root_tsar_ix);
550 esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
554 INIT_LIST_HEAD(&esw->qos.groups);
555 if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
556 esw->qos.group0 = esw_qos_create_rate_group(esw, NULL);
557 if (IS_ERR(esw->qos.group0)) {
558 esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
559 PTR_ERR(esw->qos.group0));
563 esw->qos.enabled = true;
565 mutex_unlock(&esw->state_lock);
569 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
570 SCHEDULING_HIERARCHY_E_SWITCH,
571 esw->qos.root_tsar_ix);
573 esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
574 mutex_unlock(&esw->state_lock);
577 void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw)
579 struct devlink *devlink = priv_to_devlink(esw->dev);
582 devlink_rate_nodes_destroy(devlink);
583 mutex_lock(&esw->state_lock);
584 if (!esw->qos.enabled)
588 esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL);
590 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
591 SCHEDULING_HIERARCHY_E_SWITCH,
592 esw->qos.root_tsar_ix);
594 esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err);
596 esw->qos.enabled = false;
598 mutex_unlock(&esw->state_lock);
601 int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
602 u32 max_rate, u32 bw_share)
606 lockdep_assert_held(&esw->state_lock);
607 if (!esw->qos.enabled)
610 if (vport->qos.enabled)
613 vport->qos.group = esw->qos.group0;
615 err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share);
617 vport->qos.enabled = true;
622 void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
626 lockdep_assert_held(&esw->state_lock);
627 if (!esw->qos.enabled || !vport->qos.enabled)
629 WARN(vport->qos.group && vport->qos.group != esw->qos.group0,
630 "Disabling QoS on port before detaching it from group");
632 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
633 SCHEDULING_HIERARCHY_E_SWITCH,
634 vport->qos.esw_tsar_ix);
636 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
639 vport->qos.enabled = false;
642 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
644 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
645 struct mlx5_vport *vport;
648 vport = mlx5_eswitch_get_vport(esw, vport_num);
650 return PTR_ERR(vport);
652 if (!vport->qos.enabled)
655 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
656 bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
658 return mlx5_modify_scheduling_element_cmd(esw->dev,
659 SCHEDULING_HIERARCHY_E_SWITCH,
661 vport->qos.esw_tsar_ix,
665 #define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */
667 /* Converts bytes per second value passed in a pointer into megabits per
668 * second, rewriting last. If converted rate exceed link speed or is not a
669 * fraction of Mbps - returns error.
671 static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
672 u64 *rate, struct netlink_ext_ack *extack)
674 u32 link_speed_max, reminder;
678 err = mlx5e_port_max_linkspeed(mdev, &link_speed_max);
680 NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
684 value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder);
686 pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
688 NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");
692 if (value > link_speed_max) {
693 pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
694 name, value, link_speed_max);
695 NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
703 /* Eswitch devlink rate API */
705 int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
706 u64 tx_share, struct netlink_ext_ack *extack)
708 struct mlx5_vport *vport = priv;
709 struct mlx5_eswitch *esw;
712 esw = vport->dev->priv.eswitch;
713 if (!mlx5_esw_allowed(esw))
716 err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_share", &tx_share, extack);
720 mutex_lock(&esw->state_lock);
721 err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack);
722 mutex_unlock(&esw->state_lock);
726 int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
727 u64 tx_max, struct netlink_ext_ack *extack)
729 struct mlx5_vport *vport = priv;
730 struct mlx5_eswitch *esw;
733 esw = vport->dev->priv.eswitch;
734 if (!mlx5_esw_allowed(esw))
737 err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_max", &tx_max, extack);
741 mutex_lock(&esw->state_lock);
742 err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack);
743 mutex_unlock(&esw->state_lock);
747 int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
748 u64 tx_share, struct netlink_ext_ack *extack)
750 struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
751 struct mlx5_eswitch *esw = dev->priv.eswitch;
752 struct mlx5_esw_rate_group *group = priv;
755 err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack);
759 mutex_lock(&esw->state_lock);
760 err = esw_qos_set_group_min_rate(esw, group, tx_share, extack);
761 mutex_unlock(&esw->state_lock);
765 int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
766 u64 tx_max, struct netlink_ext_ack *extack)
768 struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink);
769 struct mlx5_eswitch *esw = dev->priv.eswitch;
770 struct mlx5_esw_rate_group *group = priv;
773 err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack);
777 mutex_lock(&esw->state_lock);
778 err = esw_qos_set_group_max_rate(esw, group, tx_max, extack);
779 mutex_unlock(&esw->state_lock);
783 int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
784 struct netlink_ext_ack *extack)
786 struct mlx5_esw_rate_group *group;
787 struct mlx5_eswitch *esw;
790 esw = mlx5_devlink_eswitch_get(rate_node->devlink);
794 mutex_lock(&esw->state_lock);
795 if (esw->mode != MLX5_ESWITCH_OFFLOADS) {
796 NL_SET_ERR_MSG_MOD(extack,
797 "Rate node creation supported only in switchdev mode");
802 group = esw_qos_create_rate_group(esw, extack);
804 err = PTR_ERR(group);
810 mutex_unlock(&esw->state_lock);
814 int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
815 struct netlink_ext_ack *extack)
817 struct mlx5_esw_rate_group *group = priv;
818 struct mlx5_eswitch *esw;
821 esw = mlx5_devlink_eswitch_get(rate_node->devlink);
825 mutex_lock(&esw->state_lock);
826 err = esw_qos_destroy_rate_group(esw, group, extack);
827 mutex_unlock(&esw->state_lock);
831 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
832 struct mlx5_vport *vport,
833 struct mlx5_esw_rate_group *group,
834 struct netlink_ext_ack *extack)
838 mutex_lock(&esw->state_lock);
839 err = esw_qos_vport_update_group(esw, vport, group, extack);
840 mutex_unlock(&esw->state_lock);
844 int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate,
845 struct devlink_rate *parent,
846 void *priv, void *parent_priv,
847 struct netlink_ext_ack *extack)
849 struct mlx5_esw_rate_group *group;
850 struct mlx5_vport *vport = priv;
853 return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch,
854 vport, NULL, extack);
857 return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack);