1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
5 #include <linux/slab.h>
6 #include <linux/netdevice.h>
10 struct mlx5e_selq_params {
11 unsigned int num_regular_queues;
12 unsigned int num_channels;
18 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
20 struct mlx5e_selq_params *init_params;
22 selq->state_lock = state_lock;
24 selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
28 init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
30 kvfree(selq->standby);
34 /* Assign dummy values, so that mlx5e_select_queue won't crash. */
35 *init_params = (struct mlx5e_selq_params) {
36 .num_regular_queues = 1,
42 rcu_assign_pointer(selq->active, init_params);
47 void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
49 WARN_ON_ONCE(selq->is_prepared);
51 kvfree(selq->standby);
53 selq->is_prepared = true;
55 mlx5e_selq_apply(selq);
57 kvfree(selq->standby);
61 void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
63 lockdep_assert_held(selq->state_lock);
64 WARN_ON_ONCE(selq->is_prepared);
66 selq->is_prepared = true;
68 selq->standby->num_channels = params->num_channels;
69 selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
70 selq->standby->num_regular_queues =
71 selq->standby->num_channels * selq->standby->num_tcs;
72 selq->standby->is_htb = htb;
73 selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
76 void mlx5e_selq_apply(struct mlx5e_selq *selq)
78 struct mlx5e_selq_params *old_params;
80 WARN_ON_ONCE(!selq->is_prepared);
82 selq->is_prepared = false;
84 old_params = rcu_replace_pointer(selq->active, selq->standby,
85 lockdep_is_held(selq->state_lock));
86 synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
87 selq->standby = old_params;
90 void mlx5e_selq_cancel(struct mlx5e_selq *selq)
92 lockdep_assert_held(selq->state_lock);
93 WARN_ON_ONCE(!selq->is_prepared);
95 selq->is_prepared = false;
98 #ifdef CONFIG_MLX5_CORE_EN_DCB
99 static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
103 if (skb->protocol == htons(ETH_P_IP))
104 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
105 else if (skb->protocol == htons(ETH_P_IPV6))
106 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
108 return priv->dcbx_dp.dscp2prio[dscp_cp];
112 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
114 struct mlx5e_priv *priv = netdev_priv(dev);
117 if (!netdev_get_num_tc(dev))
120 #ifdef CONFIG_MLX5_CORE_EN_DCB
121 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
122 up = mlx5e_get_dscp_up(priv, skb);
125 if (skb_vlan_tag_present(skb))
126 up = skb_vlan_tag_get_prio(skb);
129 return priv->port_ptp_tc2realtxq[up];
132 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
137 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
138 classid = TC_H_MIN(skb->priority);
140 classid = READ_ONCE(priv->htb.defcls);
145 return mlx5e_get_txq_by_classid(priv, classid);
148 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
149 struct net_device *sb_dev)
151 struct mlx5e_priv *priv = netdev_priv(dev);
157 /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
158 num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
159 if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
160 struct mlx5e_ptp *ptp_channel;
162 /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
163 u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
165 if (unlikely(htb_maj_id)) {
166 txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
171 ptp_channel = READ_ONCE(priv->channels.ptp);
172 if (unlikely(ptp_channel &&
173 test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
174 mlx5e_use_ptpsq(skb)))
175 return mlx5e_select_ptpsq(dev, skb);
177 txq_ix = netdev_pick_tx(dev, skb, NULL);
178 /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
179 * If they are selected, switch to regular queues.
180 * Driver to select these queues only at mlx5e_select_ptpsq()
181 * and mlx5e_select_htb_queue().
183 if (unlikely(txq_ix >= num_tc_x_num_ch))
184 txq_ix %= num_tc_x_num_ch;
186 txq_ix = netdev_pick_tx(dev, skb, NULL);
189 if (!netdev_get_num_tc(dev))
192 #ifdef CONFIG_MLX5_CORE_EN_DCB
193 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
194 up = mlx5e_get_dscp_up(priv, skb);
197 if (skb_vlan_tag_present(skb))
198 up = skb_vlan_tag_get_prio(skb);
200 /* Normalize any picked txq_ix to [0, num_channels),
201 * So we can return a txq_ix that matches the channel and
204 ch_ix = priv->txq2sq[txq_ix]->ch_ix;
206 return priv->channel_tc2realtxq[ch_ix][up];