net/mlx5e: Move mlx5e_select_queue to en/selq.c
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Tue, 25 Jan 2022 10:52:52 +0000 (12:52 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 15 Feb 2022 06:30:50 +0000 (22:30 -0800)
This commit moves mlx5e_select_queue and all stuff related to
ndo_select_queue to en/selq.c to put all stuff working with selq into a
separate file.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
drivers/net/ethernet/mellanox/mlx5/core/en/selq.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index 50ea58a..297ba79 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include "en.h"
+#include "en/ptp.h"
 
 struct mlx5e_selq_params {
        unsigned int num_regular_queues;
@@ -93,3 +94,114 @@ void mlx5e_selq_cancel(struct mlx5e_selq *selq)
 
        selq->is_prepared = false;
 }
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+       int dscp_cp = 0;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+       return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int up = 0;
+
+       if (!netdev_get_num_tc(dev))
+               goto return_txq;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+       if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+               up = mlx5e_get_dscp_up(priv, skb);
+       else
+#endif
+               if (skb_vlan_tag_present(skb))
+                       up = skb_vlan_tag_get_prio(skb);
+
+return_txq:
+       return priv->port_ptp_tc2realtxq[up];
+}
+
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+                                 u16 htb_maj_id)
+{
+       u16 classid;
+
+       if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
+               classid = TC_H_MIN(skb->priority);
+       else
+               classid = READ_ONCE(priv->htb.defcls);
+
+       if (!classid)
+               return 0;
+
+       return mlx5e_get_txq_by_classid(priv, classid);
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      struct net_device *sb_dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int num_tc_x_num_ch;
+       int txq_ix;
+       int up = 0;
+       int ch_ix;
+
+       /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+       num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+       if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+               struct mlx5e_ptp *ptp_channel;
+
+               /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+               u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
+
+               if (unlikely(htb_maj_id)) {
+                       txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
+                       if (txq_ix > 0)
+                               return txq_ix;
+               }
+
+               ptp_channel = READ_ONCE(priv->channels.ptp);
+               if (unlikely(ptp_channel &&
+                            test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
+                            mlx5e_use_ptpsq(skb)))
+                       return mlx5e_select_ptpsq(dev, skb);
+
+               txq_ix = netdev_pick_tx(dev, skb, NULL);
+               /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
+                * If they are selected, switch to regular queues.
+                * Driver to select these queues only at mlx5e_select_ptpsq()
+                * and mlx5e_select_htb_queue().
+                */
+               if (unlikely(txq_ix >= num_tc_x_num_ch))
+                       txq_ix %= num_tc_x_num_ch;
+       } else {
+               txq_ix = netdev_pick_tx(dev, skb, NULL);
+       }
+
+       if (!netdev_get_num_tc(dev))
+               return txq_ix;
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+       if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+               up = mlx5e_get_dscp_up(priv, skb);
+       else
+#endif
+               if (skb_vlan_tag_present(skb))
+                       up = skb_vlan_tag_get_prio(skb);
+
+       /* Normalize any picked txq_ix to [0, num_channels),
+        * So we can return a txq_ix that matches the channel and
+        * packet UP.
+        */
+       ch_ix = priv->txq2sq[txq_ix]->ch_ix;
+
+       return priv->channel_tc2realtxq[ch_ix][up];
+}
index 2648c23..b1c73b5 100644 (file)
@@ -16,6 +16,8 @@ struct mlx5e_selq {
 };
 
 struct mlx5e_params;
+struct net_device;
+struct sk_buff;
 
 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
 void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
@@ -23,4 +25,7 @@ void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bo
 void mlx5e_selq_apply(struct mlx5e_selq *selq);
 void mlx5e_selq_cancel(struct mlx5e_selq *selq);
 
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      struct net_device *sb_dev);
+
 #endif /* __MLX5_EN_SELQ_H__ */
index 1c48cfa..210d23b 100644 (file)
@@ -55,8 +55,6 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
 
 /* TX */
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      struct net_device *sb_dev);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
index 7266617..2dc4840 100644 (file)
@@ -53,117 +53,6 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
        }
 }
 
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
-{
-       int dscp_cp = 0;
-
-       if (skb->protocol == htons(ETH_P_IP))
-               dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
-       else if (skb->protocol == htons(ETH_P_IPV6))
-               dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-
-       return priv->dcbx_dp.dscp2prio[dscp_cp];
-}
-#endif
-
-static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-       int up = 0;
-
-       if (!netdev_get_num_tc(dev))
-               goto return_txq;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-       if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
-               up = mlx5e_get_dscp_up(priv, skb);
-       else
-#endif
-               if (skb_vlan_tag_present(skb))
-                       up = skb_vlan_tag_get_prio(skb);
-
-return_txq:
-       return priv->port_ptp_tc2realtxq[up];
-}
-
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
-                                 u16 htb_maj_id)
-{
-       u16 classid;
-
-       if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
-               classid = TC_H_MIN(skb->priority);
-       else
-               classid = READ_ONCE(priv->htb.defcls);
-
-       if (!classid)
-               return 0;
-
-       return mlx5e_get_txq_by_classid(priv, classid);
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-                      struct net_device *sb_dev)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-       int num_tc_x_num_ch;
-       int txq_ix;
-       int up = 0;
-       int ch_ix;
-
-       /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
-       num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
-       if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
-               struct mlx5e_ptp *ptp_channel;
-
-               /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
-               u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
-
-               if (unlikely(htb_maj_id)) {
-                       txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
-                       if (txq_ix > 0)
-                               return txq_ix;
-               }
-
-               ptp_channel = READ_ONCE(priv->channels.ptp);
-               if (unlikely(ptp_channel &&
-                            test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
-                            mlx5e_use_ptpsq(skb)))
-                       return mlx5e_select_ptpsq(dev, skb);
-
-               txq_ix = netdev_pick_tx(dev, skb, NULL);
-               /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
-                * If they are selected, switch to regular queues.
-                * Driver to select these queues only at mlx5e_select_ptpsq()
-                * and mlx5e_select_htb_queue().
-                */
-               if (unlikely(txq_ix >= num_tc_x_num_ch))
-                       txq_ix %= num_tc_x_num_ch;
-       } else {
-               txq_ix = netdev_pick_tx(dev, skb, NULL);
-       }
-
-       if (!netdev_get_num_tc(dev))
-               return txq_ix;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-       if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
-               up = mlx5e_get_dscp_up(priv, skb);
-       else
-#endif
-               if (skb_vlan_tag_present(skb))
-                       up = skb_vlan_tag_get_prio(skb);
-
-       /* Normalize any picked txq_ix to [0, num_channels),
-        * So we can return a txq_ix that matches the channel and
-        * packet UP.
-        */
-       ch_ix = priv->txq2sq[txq_ix]->ch_ix;
-
-       return priv->channel_tc2realtxq[ch_ix][up];
-}
-
 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
 {
 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)