mptcp: Only send extra TCP acks in eligible socket states
[linux-2.6-microblaze.git] / net / core / gro_cells.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/slab.h>
4 #include <linux/netdevice.h>
5 #include <net/gro_cells.h>
6
7 struct gro_cell {
8         struct sk_buff_head     napi_skbs;
9         struct napi_struct      napi;
10 };
11
12 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
13 {
14         struct net_device *dev = skb->dev;
15         struct gro_cell *cell;
16         int res;
17
18         rcu_read_lock();
19         if (unlikely(!(dev->flags & IFF_UP)))
20                 goto drop;
21
22         if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
23                 res = netif_rx(skb);
24                 goto unlock;
25         }
26
27         cell = this_cpu_ptr(gcells->cells);
28
29         if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
30 drop:
31                 atomic_long_inc(&dev->rx_dropped);
32                 kfree_skb(skb);
33                 res = NET_RX_DROP;
34                 goto unlock;
35         }
36
37         __skb_queue_tail(&cell->napi_skbs, skb);
38         if (skb_queue_len(&cell->napi_skbs) == 1)
39                 napi_schedule(&cell->napi);
40
41         res = NET_RX_SUCCESS;
42
43 unlock:
44         rcu_read_unlock();
45         return res;
46 }
47 EXPORT_SYMBOL(gro_cells_receive);
48
49 /* called under BH context */
50 static int gro_cell_poll(struct napi_struct *napi, int budget)
51 {
52         struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
53         struct sk_buff *skb;
54         int work_done = 0;
55
56         while (work_done < budget) {
57                 skb = __skb_dequeue(&cell->napi_skbs);
58                 if (!skb)
59                         break;
60                 napi_gro_receive(napi, skb);
61                 work_done++;
62         }
63
64         if (work_done < budget)
65                 napi_complete_done(napi, work_done);
66         return work_done;
67 }
68
69 int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
70 {
71         int i;
72
73         gcells->cells = alloc_percpu(struct gro_cell);
74         if (!gcells->cells)
75                 return -ENOMEM;
76
77         for_each_possible_cpu(i) {
78                 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
79
80                 __skb_queue_head_init(&cell->napi_skbs);
81
82                 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
83
84                 netif_napi_add(dev, &cell->napi, gro_cell_poll,
85                                NAPI_POLL_WEIGHT);
86                 napi_enable(&cell->napi);
87         }
88         return 0;
89 }
90 EXPORT_SYMBOL(gro_cells_init);
91
92 void gro_cells_destroy(struct gro_cells *gcells)
93 {
94         int i;
95
96         if (!gcells->cells)
97                 return;
98         for_each_possible_cpu(i) {
99                 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
100
101                 napi_disable(&cell->napi);
102                 __netif_napi_del(&cell->napi);
103                 __skb_queue_purge(&cell->napi_skbs);
104         }
105         /* This barrier is needed because netpoll could access dev->napi_list
106          * under rcu protection.
107          */
108         synchronize_net();
109
110         free_percpu(gcells->cells);
111         gcells->cells = NULL;
112 }
113 EXPORT_SYMBOL(gro_cells_destroy);