net: skbuff: sprinkle more __GFP_NOWARN on ingress allocs
authorJakub Kicinski <kuba@kernel.org>
Fri, 2 Aug 2024 00:19:56 +0000 (17:19 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 5 Aug 2024 11:28:11 +0000 (12:28 +0100)
build_skb() and frag allocations done with GFP_ATOMIC will
fail in real life, when system is under memory pressure,
and there's nothing we can do about that. So no point
printing warnings.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/skbuff.c

index 83f8cd8..de2a044 100644 (file)
@@ -314,8 +314,8 @@ void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
        fragsz = SKB_DATA_ALIGN(fragsz);
 
        local_lock_nested_bh(&napi_alloc_cache.bh_lock);
-       data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC,
-                                      align_mask);
+       data = __page_frag_alloc_align(&nc->page, fragsz,
+                                      GFP_ATOMIC | __GFP_NOWARN, align_mask);
        local_unlock_nested_bh(&napi_alloc_cache.bh_lock);
        return data;
 
@@ -330,7 +330,8 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
                struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
 
                fragsz = SKB_DATA_ALIGN(fragsz);
-               data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC,
+               data = __page_frag_alloc_align(nc, fragsz,
+                                              GFP_ATOMIC | __GFP_NOWARN,
                                               align_mask);
        } else {
                local_bh_disable();
@@ -349,7 +350,7 @@ static struct sk_buff *napi_skb_cache_get(void)
        local_lock_nested_bh(&napi_alloc_cache.bh_lock);
        if (unlikely(!nc->skb_count)) {
                nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache,
-                                                     GFP_ATOMIC,
+                                                     GFP_ATOMIC | __GFP_NOWARN,
                                                      NAPI_SKB_CACHE_BULK,
                                                      nc->skb_cache);
                if (unlikely(!nc->skb_count)) {
@@ -418,7 +419,8 @@ struct sk_buff *slab_build_skb(void *data)
        struct sk_buff *skb;
        unsigned int size;
 
-       skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
+       skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+                              GFP_ATOMIC | __GFP_NOWARN);
        if (unlikely(!skb))
                return NULL;
 
@@ -469,7 +471,8 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 {
        struct sk_buff *skb;
 
-       skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
+       skb = kmem_cache_alloc(net_hotdata.skbuff_cache,
+                              GFP_ATOMIC | __GFP_NOWARN);
        if (unlikely(!skb))
                return NULL;