net: move netdev_max_backlog to net_hotdata
authorEric Dumazet <edumazet@google.com>
Wed, 6 Mar 2024 16:00:18 +0000 (16:00 +0000)
committerJakub Kicinski <kuba@kernel.org>
Fri, 8 Mar 2024 05:12:42 +0000 (21:12 -0800)
netdev_max_backlog is used in rx fat path.

Move it to net_hodata for better cache locality.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20240306160031.874438-6-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/linux/netdevice.h
include/net/hotdata.h
net/core/dev.c
net/core/gro_cells.c
net/core/hotdata.c
net/core/sysctl_net_core.c
net/xfrm/espintcp.c
net/xfrm/xfrm_input.c

index b18ac80..c9a671b 100644 (file)
@@ -4793,7 +4793,6 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
                           const struct pcpu_sw_netstats __percpu *netstats);
 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
 
-extern int             netdev_max_backlog;
 extern int             dev_rx_weight;
 extern int             dev_tx_weight;
 
index d462cb8..dc50b20 100644 (file)
@@ -12,6 +12,7 @@ struct net_hotdata {
        int                     netdev_budget;
        int                     netdev_budget_usecs;
        int                     tstamp_prequeue;
+       int                     max_backlog;
 };
 
 extern struct net_hotdata net_hotdata;
index e1493e5..e23edba 100644 (file)
@@ -4404,9 +4404,6 @@ EXPORT_SYMBOL(__dev_direct_xmit);
  *                     Receiver routines
  *************************************************************************/
 
-int netdev_max_backlog __read_mostly = 1000;
-EXPORT_SYMBOL(netdev_max_backlog);
-
 unsigned int sysctl_skb_defer_max __read_mostly = 64;
 int weight_p __read_mostly = 64;           /* old backlog weight */
 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
@@ -4713,7 +4710,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
        struct softnet_data *sd;
        unsigned int old_flow, new_flow;
 
-       if (qlen < (READ_ONCE(netdev_max_backlog) >> 1))
+       if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
                return false;
 
        sd = this_cpu_ptr(&softnet_data);
@@ -4761,7 +4758,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
        if (!netif_running(skb->dev))
                goto drop;
        qlen = skb_queue_len(&sd->input_pkt_queue);
-       if (qlen <= READ_ONCE(netdev_max_backlog) && !skb_flow_limit(skb, qlen)) {
+       if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
+           !skb_flow_limit(skb, qlen)) {
                if (qlen) {
 enqueue:
                        __skb_queue_tail(&sd->input_pkt_queue, skb);
index ed5ec5d..ff8e5b6 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <net/gro_cells.h>
+#include <net/hotdata.h>
 
 struct gro_cell {
        struct sk_buff_head     napi_skbs;
@@ -26,7 +27,7 @@ int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
 
        cell = this_cpu_ptr(gcells->cells);
 
-       if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
+       if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(net_hotdata.max_backlog)) {
 drop:
                dev_core_stats_rx_dropped_inc(dev);
                kfree_skb(skb);
index 29fcfe8..35ed5a8 100644 (file)
@@ -15,4 +15,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = {
        .netdev_budget_usecs = 2 * USEC_PER_SEC / HZ,
 
        .tstamp_prequeue = 1,
+       .max_backlog = 1000,
 };
+EXPORT_SYMBOL(net_hotdata);
index bddd07d..8eaeeb2 100644 (file)
@@ -440,7 +440,7 @@ static struct ctl_table net_core_table[] = {
        },
        {
                .procname       = "netdev_max_backlog",
-               .data           = &netdev_max_backlog,
+               .data           = &net_hotdata.max_backlog,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec
index d3b3f9e..fe82e2d 100644 (file)
@@ -10,6 +10,7 @@
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6_stubs.h>
 #endif
+#include <net/hotdata.h>
 
 static void handle_nonesp(struct espintcp_ctx *ctx, struct sk_buff *skb,
                          struct sock *sk)
@@ -169,7 +170,8 @@ int espintcp_queue_out(struct sock *sk, struct sk_buff *skb)
 {
        struct espintcp_ctx *ctx = espintcp_getctx(sk);
 
-       if (skb_queue_len(&ctx->out_queue) >= READ_ONCE(netdev_max_backlog))
+       if (skb_queue_len(&ctx->out_queue) >=
+           READ_ONCE(net_hotdata.max_backlog))
                return -ENOBUFS;
 
        __skb_queue_tail(&ctx->out_queue, skb);
index bd4ce21..161f535 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/ip_tunnels.h>
 #include <net/ip6_tunnel.h>
 #include <net/dst_metadata.h>
+#include <net/hotdata.h>
 
 #include "xfrm_inout.h"
 
@@ -764,7 +765,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 
        trans = this_cpu_ptr(&xfrm_trans_tasklet);
 
-       if (skb_queue_len(&trans->queue) >= READ_ONCE(netdev_max_backlog))
+       if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
                return -ENOBUFS;
 
        BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));