1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
8 #define BYTES_IN_MBIT 125000
10 int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
12 return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
15 int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
17 int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
19 return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
22 /* Software representation of the QoS tree (internal to this file) */
24 static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
26 int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
29 WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
30 res = find_first_zero_bit(priv->htb.qos_used_qids, size);
32 return res == size ? -ENOSPC : res;
35 struct mlx5e_qos_node {
36 struct hlist_node hnode;
38 struct mlx5e_qos_node *parent;
43 u32 classid; /* 16-bit, except root. */
47 #define MLX5E_QOS_QID_INNER 0xffff
48 #define MLX5E_HTB_CLASSID_ROOT 0xffffffff
50 static struct mlx5e_qos_node *
51 mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
52 struct mlx5e_qos_node *parent)
54 struct mlx5e_qos_node *node;
56 node = kzalloc(sizeof(*node), GFP_KERNEL);
58 return ERR_PTR(-ENOMEM);
60 node->parent = parent;
63 __set_bit(qid, priv->htb.qos_used_qids);
65 node->classid = classid;
66 hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
68 mlx5e_update_tx_netdev_queues(priv);
73 static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
75 struct mlx5e_qos_node *node;
77 node = kzalloc(sizeof(*node), GFP_KERNEL);
79 return ERR_PTR(-ENOMEM);
81 node->qid = MLX5E_QOS_QID_INNER;
82 node->classid = MLX5E_HTB_CLASSID_ROOT;
83 hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
88 static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
90 struct mlx5e_qos_node *node = NULL;
92 hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
93 if (node->classid == classid)
100 static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
102 struct mlx5e_qos_node *node = NULL;
104 hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
105 if (node->classid == classid)
112 static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
114 hash_del_rcu(&node->hnode);
115 if (node->qid != MLX5E_QOS_QID_INNER) {
116 __clear_bit(node->qid, priv->htb.qos_used_qids);
117 mlx5e_update_tx_netdev_queues(priv);
119 kfree_rcu(node, rcu);
122 /* TX datapath API */
124 static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
126 /* These channel params are safe to access from the datapath, because:
127 * 1. This function is called only after checking priv->htb.maj_id != 0,
128 * and the number of queues can't change while HTB offload is active.
129 * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
130 * mlx5e_select_queue to finish while holding priv->state_lock,
131 * preventing other code from changing the number of queues.
133 bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
135 return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
138 int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
140 struct mlx5e_qos_node *node;
146 node = mlx5e_sw_node_find_rcu(priv, classid);
151 qid = READ_ONCE(node->qid);
152 if (qid == MLX5E_QOS_QID_INNER) {
156 res = mlx5e_qid_from_qos(&priv->channels, qid);
163 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
165 struct mlx5e_params *params = &priv->channels.params;
166 struct mlx5e_txqsq __rcu **qos_sqs;
167 struct mlx5e_channel *c;
170 ix = qid % params->num_channels;
171 qid /= params->num_channels;
172 c = priv->channels.c[ix];
174 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
175 return mlx5e_state_dereference(priv, qos_sqs[qid]);
180 static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
181 struct mlx5e_qos_node *node)
183 struct mlx5e_create_cq_param ccp = {};
184 struct mlx5e_txqsq __rcu **qos_sqs;
185 struct mlx5e_sq_param param_sq;
186 struct mlx5e_cq_param param_cq;
187 int txq_ix, ix, qid, err = 0;
188 struct mlx5e_params *params;
189 struct mlx5e_channel *c;
190 struct mlx5e_txqsq *sq;
192 params = &chs->params;
194 txq_ix = mlx5e_qid_from_qos(chs, node->qid);
196 WARN_ON(node->qid > priv->htb.max_qos_sqs);
197 if (node->qid == priv->htb.max_qos_sqs) {
198 struct mlx5e_sq_stats *stats, **stats_list = NULL;
200 if (priv->htb.max_qos_sqs == 0) {
201 stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
207 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
213 WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
214 WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
215 /* Order max_qos_sqs increment after writing the array pointer.
216 * Pairs with smp_load_acquire in en_stats.c.
218 smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
221 ix = node->qid % params->num_channels;
222 qid = node->qid / params->num_channels;
225 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
226 sq = kzalloc(sizeof(*sq), GFP_KERNEL);
231 mlx5e_build_create_cq_param(&ccp, c);
233 memset(¶m_sq, 0, sizeof(param_sq));
234 memset(¶m_cq, 0, sizeof(param_cq));
235 mlx5e_build_sq_param(priv, params, ¶m_sq);
236 mlx5e_build_tx_cq_param(priv, params, ¶m_cq);
237 err = mlx5e_open_cq(priv, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq);
240 err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
241 ¶m_sq, sq, 0, node->hw_id, node->qid);
245 rcu_assign_pointer(qos_sqs[qid], sq);
250 mlx5e_close_cq(&sq->cq);
256 static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
258 struct mlx5e_txqsq *sq;
260 sq = mlx5e_get_qos_sq(priv, node->qid);
262 WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
264 /* Make the change to txq2sq visible before the queue is started.
265 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
266 * which pairs with this barrier.
270 qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
271 mlx5e_activate_txqsq(sq);
274 static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
276 struct mlx5e_txqsq *sq;
278 sq = mlx5e_get_qos_sq(priv, qid);
279 if (!sq) /* Handle the case when the SQ failed to open. */
282 qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
283 mlx5e_deactivate_txqsq(sq);
285 /* The queue is disabled, no synchronization with datapath is needed. */
286 priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
289 static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
291 struct mlx5e_txqsq __rcu **qos_sqs;
292 struct mlx5e_params *params;
293 struct mlx5e_channel *c;
294 struct mlx5e_txqsq *sq;
297 params = &priv->channels.params;
299 ix = qid % params->num_channels;
300 qid /= params->num_channels;
301 c = priv->channels.c[ix];
302 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
303 sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
304 if (!sq) /* Handle the case when the SQ failed to open. */
307 synchronize_rcu(); /* Sync with NAPI. */
309 mlx5e_close_txqsq(sq);
310 mlx5e_close_cq(&sq->cq);
314 void mlx5e_qos_close_queues(struct mlx5e_channel *c)
316 struct mlx5e_txqsq __rcu **qos_sqs;
319 qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
322 synchronize_rcu(); /* Sync with NAPI. */
324 for (i = 0; i < c->qos_sqs_size; i++) {
325 struct mlx5e_txqsq *sq;
327 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
328 if (!sq) /* Handle the case when the SQ failed to open. */
331 mlx5e_close_txqsq(sq);
332 mlx5e_close_cq(&sq->cq);
339 static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
343 for (i = 0; i < chs->num; i++)
344 mlx5e_qos_close_queues(chs->c[i]);
347 static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
352 qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
354 for (i = 0; i < chs->num; i++) {
355 struct mlx5e_txqsq **sqs;
357 sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
361 WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
362 smp_wmb(); /* Pairs with mlx5e_napi_poll. */
363 rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
370 struct mlx5e_txqsq **sqs;
372 sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
373 lockdep_is_held(&priv->state_lock));
375 synchronize_rcu(); /* Sync with NAPI. */
381 int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
383 struct mlx5e_qos_node *node = NULL;
386 if (!priv->htb.maj_id)
389 err = mlx5e_qos_alloc_queues(priv, chs);
393 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
394 if (node->qid == MLX5E_QOS_QID_INNER)
396 err = mlx5e_open_qos_sq(priv, chs, node);
398 mlx5e_qos_close_all_queues(chs);
406 void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
408 struct mlx5e_qos_node *node = NULL;
411 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
412 if (node->qid == MLX5E_QOS_QID_INNER)
414 mlx5e_activate_qos_sq(priv, node);
418 void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
420 struct mlx5e_params *params = &c->priv->channels.params;
421 struct mlx5e_txqsq __rcu **qos_sqs;
424 qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
428 for (i = 0; i < c->qos_sqs_size; i++) {
429 u16 qid = params->num_channels * i + c->ix;
430 struct mlx5e_txqsq *sq;
432 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
433 if (!sq) /* Handle the case when the SQ failed to open. */
436 qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
437 mlx5e_deactivate_txqsq(sq);
439 /* The queue is disabled, no synchronization with datapath is needed. */
440 c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
444 static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
448 for (i = 0; i < chs->num; i++)
449 mlx5e_qos_deactivate_queues(chs->c[i]);
454 int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
455 struct netlink_ext_ack *extack)
457 struct mlx5e_qos_node *root;
461 qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
463 if (!mlx5_qos_is_supported(priv->mdev)) {
464 NL_SET_ERR_MSG_MOD(extack,
465 "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
469 opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
471 err = mlx5e_qos_alloc_queues(priv, &priv->channels);
476 root = mlx5e_sw_node_create_root(priv);
479 goto err_free_queues;
482 err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
484 NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
485 goto err_sw_node_delete;
488 WRITE_ONCE(priv->htb.defcls, htb_defcls);
489 /* Order maj_id after defcls - pairs with
490 * mlx5e_select_queue/mlx5e_select_htb_queues.
492 smp_store_release(&priv->htb.maj_id, htb_maj_id);
497 mlx5e_sw_node_delete(priv, root);
501 mlx5e_qos_close_all_queues(&priv->channels);
505 int mlx5e_htb_root_del(struct mlx5e_priv *priv)
507 struct mlx5e_qos_node *root;
510 qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
512 WRITE_ONCE(priv->htb.maj_id, 0);
513 synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
515 root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
517 qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
520 err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
522 qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
524 mlx5e_sw_node_delete(priv, root);
526 mlx5e_qos_deactivate_all_queues(&priv->channels);
527 mlx5e_qos_close_all_queues(&priv->channels);
532 static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
533 struct mlx5e_qos_node *parent, u32 *bw_share)
537 while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
538 parent = parent->parent;
540 if (parent->max_average_bw)
541 share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
542 parent->max_average_bw);
546 *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
548 qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
549 rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
554 static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
556 *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
558 qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
559 ceil, *max_average_bw);
562 int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
563 u32 parent_classid, u64 rate, u64 ceil,
564 struct netlink_ext_ack *extack)
566 struct mlx5e_qos_node *node, *parent;
570 qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
571 classid, parent_classid, rate, ceil);
573 qid = mlx5e_find_unused_qos_qid(priv);
575 NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
579 parent = mlx5e_sw_node_find(priv, parent_classid);
583 node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
585 return PTR_ERR(node);
588 mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
589 mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
591 err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
592 node->bw_share, node->max_average_bw,
595 NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
596 qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
598 mlx5e_sw_node_delete(priv, node);
602 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
603 err = mlx5e_open_qos_sq(priv, &priv->channels, node);
605 NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
606 qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
609 mlx5e_activate_qos_sq(priv, node);
613 return mlx5e_qid_from_qos(&priv->channels, node->qid);
616 int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
617 u64 rate, u64 ceil, struct netlink_ext_ack *extack)
619 struct mlx5e_qos_node *node, *child;
624 qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
625 classid, child_classid, rate, ceil);
627 node = mlx5e_sw_node_find(priv, classid);
631 err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
632 node->bw_share, node->max_average_bw,
635 NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
636 qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
641 /* Intentionally reuse the qid for the upcoming first child. */
642 child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
644 err = PTR_ERR(child);
645 goto err_destroy_hw_node;
649 mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
650 mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
652 err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
653 child->max_average_bw, &child->hw_id);
655 NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
656 qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
658 goto err_delete_sw_node;
664 /* Pairs with mlx5e_get_txq_by_classid. */
665 WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
667 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
668 mlx5e_deactivate_qos_sq(priv, qid);
669 mlx5e_close_qos_sq(priv, qid);
672 err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
673 if (err) /* Not fatal. */
674 qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
675 node->hw_id, classid, err);
677 node->hw_id = new_hw_id;
679 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
680 err = mlx5e_open_qos_sq(priv, &priv->channels, child);
682 NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
683 qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
686 mlx5e_activate_qos_sq(priv, child);
693 child->qid = MLX5E_QOS_QID_INNER;
694 mlx5e_sw_node_delete(priv, child);
697 tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
698 if (tmp_err) /* Not fatal. */
699 qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
700 new_hw_id, classid, tmp_err);
704 static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
706 struct mlx5e_qos_node *node = NULL;
709 hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
710 if (node->qid == qid)
716 static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
718 qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
719 netdev_tx_reset_queue(txq);
720 netif_tx_start_queue(txq);
723 static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
725 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
726 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
731 spin_lock_bh(qdisc_lock(qdisc));
733 spin_unlock_bh(qdisc_lock(qdisc));
736 int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
737 u16 *new_qid, struct netlink_ext_ack *extack)
739 struct mlx5e_qos_node *node;
740 struct netdev_queue *txq;
745 qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
747 *old_qid = *new_qid = 0;
749 node = mlx5e_sw_node_find(priv, classid);
753 /* Store qid for reuse. */
756 opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
758 txq = netdev_get_tx_queue(priv->netdev,
759 mlx5e_qid_from_qos(&priv->channels, qid));
760 mlx5e_deactivate_qos_sq(priv, qid);
761 mlx5e_close_qos_sq(priv, qid);
764 err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
765 if (err) /* Not fatal. */
766 qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
767 node->hw_id, classid, err);
769 mlx5e_sw_node_delete(priv, node);
771 moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
773 if (moved_qid == 0) {
774 /* The last QoS SQ was just destroyed. */
776 mlx5e_reactivate_qos_sq(priv, qid, txq);
781 if (moved_qid < qid) {
782 /* The highest QoS SQ was just destroyed. */
783 WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
786 mlx5e_reactivate_qos_sq(priv, qid, txq);
790 WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
791 qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
793 node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
794 WARN(!node, "Could not find a node with qid %u to move to queue %u",
797 /* Stop traffic to the old queue. */
798 WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
799 __clear_bit(moved_qid, priv->htb.qos_used_qids);
802 txq = netdev_get_tx_queue(priv->netdev,
803 mlx5e_qid_from_qos(&priv->channels, moved_qid));
804 mlx5e_deactivate_qos_sq(priv, moved_qid);
805 mlx5e_close_qos_sq(priv, moved_qid);
808 /* Prevent packets from the old class from getting into the new one. */
809 mlx5e_reset_qdisc(priv->netdev, moved_qid);
811 __set_bit(qid, priv->htb.qos_used_qids);
812 WRITE_ONCE(node->qid, qid);
814 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
815 err = mlx5e_open_qos_sq(priv, &priv->channels, node);
817 NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
818 qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
819 node->classid, moved_qid, qid, err);
821 mlx5e_activate_qos_sq(priv, node);
825 mlx5e_update_tx_netdev_queues(priv);
827 mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
829 *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
830 *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
834 int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
835 struct netlink_ext_ack *extack)
837 struct mlx5e_qos_node *node, *parent;
838 u32 old_hw_id, new_hw_id;
839 int err, saved_err = 0;
842 qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
843 force ? "_FORCE" : "", classid);
845 node = mlx5e_sw_node_find(priv, classid);
849 err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
850 node->parent->bw_share,
851 node->parent->max_average_bw,
854 NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
855 qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
862 /* Store qid for reuse and prevent clearing the bit. */
864 /* Pairs with mlx5e_get_txq_by_classid. */
865 WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
867 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
868 mlx5e_deactivate_qos_sq(priv, qid);
869 mlx5e_close_qos_sq(priv, qid);
872 /* Prevent packets from the old class from getting into the new one. */
873 mlx5e_reset_qdisc(priv->netdev, qid);
875 err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
876 if (err) /* Not fatal. */
877 qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
878 node->hw_id, classid, err);
880 parent = node->parent;
881 mlx5e_sw_node_delete(priv, node);
884 WRITE_ONCE(node->qid, qid);
886 /* Early return on error in force mode. Parent will still be an inner
887 * node to be deleted by a following delete operation.
892 old_hw_id = node->hw_id;
893 node->hw_id = new_hw_id;
895 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
896 err = mlx5e_open_qos_sq(priv, &priv->channels, node);
898 NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
899 qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
902 mlx5e_activate_qos_sq(priv, node);
906 err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
907 if (err) /* Not fatal. */
908 qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
909 node->hw_id, classid, err);
914 static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
915 struct netlink_ext_ack *extack)
917 struct mlx5e_qos_node *child;
921 hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
922 u32 old_bw_share = child->bw_share;
925 if (child->parent != node)
928 mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
929 if (child->bw_share == old_bw_share)
932 err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
933 child->max_average_bw, child->hw_id);
934 if (!err && err_one) {
937 NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
938 qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
946 int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
947 struct netlink_ext_ack *extack)
949 u32 bw_share, max_average_bw;
950 struct mlx5e_qos_node *node;
951 bool ceil_changed = false;
954 qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
955 classid, rate, ceil);
957 node = mlx5e_sw_node_find(priv, classid);
962 mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
963 mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
965 err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
966 max_average_bw, node->hw_id);
968 NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
969 qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
974 if (max_average_bw != node->max_average_bw)
977 node->bw_share = bw_share;
978 node->max_average_bw = max_average_bw;
981 err = mlx5e_qos_update_children(priv, node, extack);