net: netpoll: Individualize the skb pool
authorBreno Leitao <leitao@debian.org>
Thu, 14 Nov 2024 11:00:11 +0000 (03:00 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 16 Nov 2024 00:25:34 +0000 (16:25 -0800)
The current implementation of the netpoll system uses a global skb
pool, which can lead to inefficient memory usage and
waste when targets are disabled or no longer in use.

This can result in a significant amount of memory being unnecessarily
allocated and retained, potentially causing performance issues and
limiting the availability of resources for other system components.

Modify the netpoll system to assign a skb pool to each target instead of
using a global one.

This approach allows for more fine-grained control over memory
allocation and deallocation, ensuring that resources are only allocated
and retained as needed.

Signed-off-by: Breno Leitao <leitao@debian.org>
Link: https://patch.msgid.link/20241114-skb_buffers_v2-v3-1-9be9f52a8b69@debian.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
include/linux/netpoll.h
net/core/netpoll.c

index cd4e28d..77635b8 100644 (file)
@@ -32,6 +32,7 @@ struct netpoll {
        bool ipv6;
        u16 local_port, remote_port;
        u8 remote_mac[ETH_ALEN];
+       struct sk_buff_head skb_pool;
 };
 
 struct netpoll_info {
index 94b7f07..719c9aa 100644 (file)
@@ -45,9 +45,6 @@
 
 #define MAX_UDP_CHUNK 1460
 #define MAX_SKBS 32
-
-static struct sk_buff_head skb_pool;
-
 #define USEC_PER_POLL  50
 
 #define MAX_SKB_SIZE                                                   \
@@ -234,20 +231,23 @@ void netpoll_poll_enable(struct net_device *dev)
                up(&ni->dev_lock);
 }
 
-static void refill_skbs(void)
+static void refill_skbs(struct netpoll *np)
 {
+       struct sk_buff_head *skb_pool;
        struct sk_buff *skb;
        unsigned long flags;
 
-       spin_lock_irqsave(&skb_pool.lock, flags);
-       while (skb_pool.qlen < MAX_SKBS) {
+       skb_pool = &np->skb_pool;
+
+       spin_lock_irqsave(&skb_pool->lock, flags);
+       while (skb_pool->qlen < MAX_SKBS) {
                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
                if (!skb)
                        break;
 
-               __skb_queue_tail(&skb_pool, skb);
+               __skb_queue_tail(skb_pool, skb);
        }
-       spin_unlock_irqrestore(&skb_pool.lock, flags);
+       spin_unlock_irqrestore(&skb_pool->lock, flags);
 }
 
 static void zap_completion_queue(void)
@@ -284,12 +284,12 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
        struct sk_buff *skb;
 
        zap_completion_queue();
-       refill_skbs();
+       refill_skbs(np);
 repeat:
 
        skb = alloc_skb(len, GFP_ATOMIC);
        if (!skb)
-               skb = skb_dequeue(&skb_pool);
+               skb = skb_dequeue(&np->skb_pool);
 
        if (!skb) {
                if (++count < 10) {
@@ -673,6 +673,8 @@ int netpoll_setup(struct netpoll *np)
        struct in_device *in_dev;
        int err;
 
+       skb_queue_head_init(&np->skb_pool);
+
        rtnl_lock();
        if (np->dev_name[0]) {
                struct net *net = current->nsproxy->net_ns;
@@ -773,7 +775,7 @@ put_noaddr:
        }
 
        /* fill up the skb queue */
-       refill_skbs();
+       refill_skbs(np);
 
        err = __netpoll_setup(np, ndev);
        if (err)
@@ -792,13 +794,6 @@ unlock:
 }
 EXPORT_SYMBOL(netpoll_setup);
 
-static int __init netpoll_init(void)
-{
-       skb_queue_head_init(&skb_pool);
-       return 0;
-}
-core_initcall(netpoll_init);
-
 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
 {
        struct netpoll_info *npinfo =