#define BYTES_IN_MBIT 125000
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+};
+
int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
{
if (nbytes < BYTES_IN_MBIT) {
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
{
- int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
+ int last;
+ last = find_last_bit(priv->htb->qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
}
int res;
WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
- res = find_first_zero_bit(priv->htb.qos_used_qids, size);
+ res = find_first_zero_bit(priv->htb->qos_used_qids, size);
return res == size ? -ENOSPC : res;
}
node->parent = parent;
node->qid = qid;
- __set_bit(qid, priv->htb.qos_used_qids);
+ __set_bit(qid, priv->htb->qos_used_qids);
node->classid = classid;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
+ hash_add_rcu(priv->htb->qos_tc2node, &node->hnode, classid);
mlx5e_update_tx_netdev_queues(priv);
node->qid = MLX5E_QOS_QID_INNER;
node->classid = MLX5E_HTB_CLASSID_ROOT;
- hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
+ hash_add_rcu(priv->htb->qos_tc2node, &node->hnode, node->classid);
return node;
}
{
struct mlx5e_qos_node *node = NULL;
- hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
+ hash_for_each_possible(priv->htb->qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
{
struct mlx5e_qos_node *node = NULL;
- hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
+ hash_for_each_possible_rcu(priv->htb->qos_tc2node, node, hnode, classid) {
if (node->classid == classid)
break;
}
{
hash_del_rcu(&node->hnode);
if (node->qid != MLX5E_QOS_QID_INNER) {
- __clear_bit(node->qid, priv->htb.qos_used_qids);
+ __clear_bit(node->qid, priv->htb->qos_used_qids);
mlx5e_update_tx_netdev_queues(priv);
}
/* Make sure this qid is no longer selected by mlx5e_select_queue, so
if (err)
return err;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ hash_for_each(priv->htb->qos_tc2node, bkt, node, hnode) {
if (node->qid == MLX5E_QOS_QID_INNER)
continue;
err = mlx5e_open_qos_sq(priv, chs, node);
struct mlx5e_qos_node *node = NULL;
int bkt;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ hash_for_each(priv->htb->qos_tc2node, bkt, node, hnode) {
if (node->qid == MLX5E_QOS_QID_INNER)
continue;
mlx5e_activate_qos_sq(priv, node);
qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
- if (!mlx5_qos_is_supported(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
- return -EOPNOTSUPP;
- }
-
mlx5e_selq_prepare_htb(&priv->selq, htb_maj_id, htb_defcls);
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
struct mlx5e_qos_node *node = NULL;
int bkt;
- hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
+ hash_for_each(priv->htb->qos_tc2node, bkt, node, hnode)
if (node->qid == qid)
break;
/* Stop traffic to the old queue. */
WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
- __clear_bit(moved_qid, priv->htb.qos_used_qids);
+ __clear_bit(moved_qid, priv->htb->qos_used_qids);
if (opened) {
txq = netdev_get_tx_queue(priv->netdev,
/* Prevent packets from the old class from getting into the new one. */
mlx5e_reset_qdisc(priv->netdev, moved_qid);
- __set_bit(qid, priv->htb.qos_used_qids);
+ __set_bit(qid, priv->htb->qos_used_qids);
WRITE_ONCE(node->qid, qid);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
int err = 0;
int bkt;
- hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
+ hash_for_each(priv->htb->qos_tc2node, bkt, child, hnode) {
u32 old_bw_share = child->bw_share;
int err_one;
}
/* HTB API */
+
+static struct mlx5e_htb *mlx5e_htb_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_htb), GFP_KERNEL);
+}
+
+static void mlx5e_htb_free(struct mlx5e_htb *htb)
+{
+ kvfree(htb);
+}
+
+static int mlx5e_htb_init(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+ hash_init(priv->htb->qos_tc2node);
+
+ return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid, htb->extack);
+}
+
+static void mlx5e_htb_cleanup(struct mlx5e_priv *priv)
+{
+ mlx5e_htb_root_del(priv);
+}
+
int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
{
int res;
+ if (!priv->htb && htb->command != TC_HTB_CREATE)
+ return -EINVAL;
+
switch (htb->command) {
case TC_HTB_CREATE:
- return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
- htb->extack);
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(htb->extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+ priv->htb = mlx5e_htb_alloc();
+ if (!priv->htb)
+ return -ENOMEM;
+ res = mlx5e_htb_init(priv, htb);
+ if (res) {
+ mlx5e_htb_free(priv->htb);
+ priv->htb = NULL;
+ }
+ return res;
case TC_HTB_DESTROY:
- return mlx5e_htb_root_del(priv);
+ mlx5e_htb_cleanup(priv);
+ mlx5e_htb_free(priv->htb);
+ priv->htb = NULL;
+ return 0;
case TC_HTB_LEAF_ALLOC_QUEUE:
res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
htb->rate, htb->ceil, htb->extack);