36e994519488e5e16e2f632878a815650478552c
[linux-2.6-microblaze.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
87 #include <linux/mm.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
105 #include <net/dst.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
126 #include <net/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 #include <linux/net_namespace.h>
149
150 #include "net-sysfs.h"
151
152 #define MAX_GRO_SKBS 8
153
154 /* This should be increased if a protocol with a bigger head is added. */
155 #define GRO_MAX_HEAD (MAX_HEADER + 128)
156
157 static DEFINE_SPINLOCK(ptype_lock);
158 static DEFINE_SPINLOCK(offload_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;       /* Taps */
161 static struct list_head offload_base __read_mostly;
162
163 static int netif_rx_internal(struct sk_buff *skb);
164 static int call_netdevice_notifiers_info(unsigned long val,
165                                          struct netdev_notifier_info *info);
166 static struct napi_struct *napi_by_id(unsigned int napi_id);
167
168 /*
169  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
170  * semaphore.
171  *
172  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173  *
174  * Writers must hold the rtnl semaphore while they loop through the
175  * dev_base_head list, and hold dev_base_lock for writing when they do the
176  * actual updates.  This allows pure readers to access the list even
177  * while a writer is preparing to update it.
178  *
179  * To put it another way, dev_base_lock is held for writing only to
180  * protect against pure readers; the rtnl semaphore provides the
181  * protection against other writers.
182  *
183  * See, for example usages, register_netdevice() and
184  * unregister_netdevice(), which must be called with the rtnl
185  * semaphore held.
186  */
187 DEFINE_RWLOCK(dev_base_lock);
188 EXPORT_SYMBOL(dev_base_lock);
189
190 static DEFINE_MUTEX(ifalias_mutex);
191
192 /* protects napi_hash addition/deletion and napi_gen_id */
193 static DEFINE_SPINLOCK(napi_hash_lock);
194
195 static unsigned int napi_gen_id = NR_CPUS;
196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
197
198 static seqcount_t devnet_rename_seq;
199
200 static inline void dev_base_seq_inc(struct net *net)
201 {
202         while (++net->dev_base_seq == 0)
203                 ;
204 }
205
206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207 {
208         unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
209
210         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
211 }
212
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214 {
215         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
216 }
217
218 static inline void rps_lock(struct softnet_data *sd)
219 {
220 #ifdef CONFIG_RPS
221         spin_lock(&sd->input_pkt_queue.lock);
222 #endif
223 }
224
225 static inline void rps_unlock(struct softnet_data *sd)
226 {
227 #ifdef CONFIG_RPS
228         spin_unlock(&sd->input_pkt_queue.lock);
229 #endif
230 }
231
232 /* Device list insertion */
233 static void list_netdevice(struct net_device *dev)
234 {
235         struct net *net = dev_net(dev);
236
237         ASSERT_RTNL();
238
239         write_lock_bh(&dev_base_lock);
240         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
241         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
242         hlist_add_head_rcu(&dev->index_hlist,
243                            dev_index_hash(net, dev->ifindex));
244         write_unlock_bh(&dev_base_lock);
245
246         dev_base_seq_inc(net);
247 }
248
249 /* Device list removal
250  * caller must respect a RCU grace period before freeing/reusing dev
251  */
252 static void unlist_netdevice(struct net_device *dev)
253 {
254         ASSERT_RTNL();
255
256         /* Unlink dev from the device chain */
257         write_lock_bh(&dev_base_lock);
258         list_del_rcu(&dev->dev_list);
259         hlist_del_rcu(&dev->name_hlist);
260         hlist_del_rcu(&dev->index_hlist);
261         write_unlock_bh(&dev_base_lock);
262
263         dev_base_seq_inc(dev_net(dev));
264 }
265
266 /*
267  *      Our notifier list
268  */
269
270 static RAW_NOTIFIER_HEAD(netdev_chain);
271
272 /*
273  *      Device drivers call our routines to queue packets here. We empty the
274  *      queue in the local softnet handler.
275  */
276
277 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
278 EXPORT_PER_CPU_SYMBOL(softnet_data);
279
280 #ifdef CONFIG_LOCKDEP
281 /*
282  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
283  * according to dev->type
284  */
285 static const unsigned short netdev_lock_type[] = {
286          ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
287          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
288          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
289          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
290          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
291          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
292          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
293          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
294          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
295          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
296          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
297          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
298          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
299          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
300          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
301
302 static const char *const netdev_lock_name[] = {
303         "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
304         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
305         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
306         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
307         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
308         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
309         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
310         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
311         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
312         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
313         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
314         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
315         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
316         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
317         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
318
319 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
320 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
321
322 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
323 {
324         int i;
325
326         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
327                 if (netdev_lock_type[i] == dev_type)
328                         return i;
329         /* the last key is used by default */
330         return ARRAY_SIZE(netdev_lock_type) - 1;
331 }
332
333 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
334                                                  unsigned short dev_type)
335 {
336         int i;
337
338         i = netdev_lock_pos(dev_type);
339         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
340                                    netdev_lock_name[i]);
341 }
342
343 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344 {
345         int i;
346
347         i = netdev_lock_pos(dev->type);
348         lockdep_set_class_and_name(&dev->addr_list_lock,
349                                    &netdev_addr_lock_key[i],
350                                    netdev_lock_name[i]);
351 }
352 #else
353 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
354                                                  unsigned short dev_type)
355 {
356 }
357 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
358 {
359 }
360 #endif
361
362 /*******************************************************************************
363  *
364  *              Protocol management and registration routines
365  *
366  *******************************************************************************/
367
368
369 /*
370  *      Add a protocol ID to the list. Now that the input handler is
371  *      smarter we can dispense with all the messy stuff that used to be
372  *      here.
373  *
374  *      BEWARE!!! Protocol handlers, mangling input packets,
375  *      MUST BE last in hash buckets and checking protocol handlers
376  *      MUST start from promiscuous ptype_all chain in net_bh.
377  *      It is true now, do not change it.
378  *      Explanation follows: if protocol handler, mangling packet, will
379  *      be the first on list, it is not able to sense, that packet
380  *      is cloned and should be copied-on-write, so that it will
381  *      change it and subsequent readers will get broken packet.
382  *                                                      --ANK (980803)
383  */
384
385 static inline struct list_head *ptype_head(const struct packet_type *pt)
386 {
387         if (pt->type == htons(ETH_P_ALL))
388                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
389         else
390                 return pt->dev ? &pt->dev->ptype_specific :
391                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
392 }
393
394 /**
395  *      dev_add_pack - add packet handler
396  *      @pt: packet type declaration
397  *
398  *      Add a protocol handler to the networking stack. The passed &packet_type
399  *      is linked into kernel lists and may not be freed until it has been
400  *      removed from the kernel lists.
401  *
402  *      This call does not sleep therefore it can not
403  *      guarantee all CPU's that are in middle of receiving packets
404  *      will see the new packet type (until the next received packet).
405  */
406
407 void dev_add_pack(struct packet_type *pt)
408 {
409         struct list_head *head = ptype_head(pt);
410
411         spin_lock(&ptype_lock);
412         list_add_rcu(&pt->list, head);
413         spin_unlock(&ptype_lock);
414 }
415 EXPORT_SYMBOL(dev_add_pack);
416
417 /**
418  *      __dev_remove_pack        - remove packet handler
419  *      @pt: packet type declaration
420  *
421  *      Remove a protocol handler that was previously added to the kernel
422  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
423  *      from the kernel lists and can be freed or reused once this function
424  *      returns.
425  *
426  *      The packet type might still be in use by receivers
427  *      and must not be freed until after all the CPU's have gone
428  *      through a quiescent state.
429  */
430 void __dev_remove_pack(struct packet_type *pt)
431 {
432         struct list_head *head = ptype_head(pt);
433         struct packet_type *pt1;
434
435         spin_lock(&ptype_lock);
436
437         list_for_each_entry(pt1, head, list) {
438                 if (pt == pt1) {
439                         list_del_rcu(&pt->list);
440                         goto out;
441                 }
442         }
443
444         pr_warn("dev_remove_pack: %p not found\n", pt);
445 out:
446         spin_unlock(&ptype_lock);
447 }
448 EXPORT_SYMBOL(__dev_remove_pack);
449
450 /**
451  *      dev_remove_pack  - remove packet handler
452  *      @pt: packet type declaration
453  *
454  *      Remove a protocol handler that was previously added to the kernel
455  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
456  *      from the kernel lists and can be freed or reused once this function
457  *      returns.
458  *
459  *      This call sleeps to guarantee that no CPU is looking at the packet
460  *      type after return.
461  */
462 void dev_remove_pack(struct packet_type *pt)
463 {
464         __dev_remove_pack(pt);
465
466         synchronize_net();
467 }
468 EXPORT_SYMBOL(dev_remove_pack);
469
470
471 /**
472  *      dev_add_offload - register offload handlers
473  *      @po: protocol offload declaration
474  *
475  *      Add protocol offload handlers to the networking stack. The passed
476  *      &proto_offload is linked into kernel lists and may not be freed until
477  *      it has been removed from the kernel lists.
478  *
479  *      This call does not sleep therefore it can not
480  *      guarantee all CPU's that are in middle of receiving packets
481  *      will see the new offload handlers (until the next received packet).
482  */
483 void dev_add_offload(struct packet_offload *po)
484 {
485         struct packet_offload *elem;
486
487         spin_lock(&offload_lock);
488         list_for_each_entry(elem, &offload_base, list) {
489                 if (po->priority < elem->priority)
490                         break;
491         }
492         list_add_rcu(&po->list, elem->list.prev);
493         spin_unlock(&offload_lock);
494 }
495 EXPORT_SYMBOL(dev_add_offload);
496
497 /**
498  *      __dev_remove_offload     - remove offload handler
499  *      @po: packet offload declaration
500  *
501  *      Remove a protocol offload handler that was previously added to the
502  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
503  *      is removed from the kernel lists and can be freed or reused once this
504  *      function returns.
505  *
506  *      The packet type might still be in use by receivers
507  *      and must not be freed until after all the CPU's have gone
508  *      through a quiescent state.
509  */
510 static void __dev_remove_offload(struct packet_offload *po)
511 {
512         struct list_head *head = &offload_base;
513         struct packet_offload *po1;
514
515         spin_lock(&offload_lock);
516
517         list_for_each_entry(po1, head, list) {
518                 if (po == po1) {
519                         list_del_rcu(&po->list);
520                         goto out;
521                 }
522         }
523
524         pr_warn("dev_remove_offload: %p not found\n", po);
525 out:
526         spin_unlock(&offload_lock);
527 }
528
529 /**
530  *      dev_remove_offload       - remove packet offload handler
531  *      @po: packet offload declaration
532  *
533  *      Remove a packet offload handler that was previously added to the kernel
534  *      offload handlers by dev_add_offload(). The passed &offload_type is
535  *      removed from the kernel lists and can be freed or reused once this
536  *      function returns.
537  *
538  *      This call sleeps to guarantee that no CPU is looking at the packet
539  *      type after return.
540  */
541 void dev_remove_offload(struct packet_offload *po)
542 {
543         __dev_remove_offload(po);
544
545         synchronize_net();
546 }
547 EXPORT_SYMBOL(dev_remove_offload);
548
549 /******************************************************************************
550  *
551  *                    Device Boot-time Settings Routines
552  *
553  ******************************************************************************/
554
555 /* Boot time configuration table */
556 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
557
558 /**
559  *      netdev_boot_setup_add   - add new setup entry
560  *      @name: name of the device
561  *      @map: configured settings for the device
562  *
563  *      Adds new setup entry to the dev_boot_setup list.  The function
564  *      returns 0 on error and 1 on success.  This is a generic routine to
565  *      all netdevices.
566  */
567 static int netdev_boot_setup_add(char *name, struct ifmap *map)
568 {
569         struct netdev_boot_setup *s;
570         int i;
571
572         s = dev_boot_setup;
573         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
574                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
575                         memset(s[i].name, 0, sizeof(s[i].name));
576                         strlcpy(s[i].name, name, IFNAMSIZ);
577                         memcpy(&s[i].map, map, sizeof(s[i].map));
578                         break;
579                 }
580         }
581
582         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
583 }
584
585 /**
586  * netdev_boot_setup_check      - check boot time settings
587  * @dev: the netdevice
588  *
589  * Check boot time settings for the device.
590  * The found settings are set for the device to be used
591  * later in the device probing.
592  * Returns 0 if no settings found, 1 if they are.
593  */
594 int netdev_boot_setup_check(struct net_device *dev)
595 {
596         struct netdev_boot_setup *s = dev_boot_setup;
597         int i;
598
599         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
600                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
601                     !strcmp(dev->name, s[i].name)) {
602                         dev->irq = s[i].map.irq;
603                         dev->base_addr = s[i].map.base_addr;
604                         dev->mem_start = s[i].map.mem_start;
605                         dev->mem_end = s[i].map.mem_end;
606                         return 1;
607                 }
608         }
609         return 0;
610 }
611 EXPORT_SYMBOL(netdev_boot_setup_check);
612
613
614 /**
615  * netdev_boot_base     - get address from boot time settings
616  * @prefix: prefix for network device
617  * @unit: id for network device
618  *
619  * Check boot time settings for the base address of device.
620  * The found settings are set for the device to be used
621  * later in the device probing.
622  * Returns 0 if no settings found.
623  */
624 unsigned long netdev_boot_base(const char *prefix, int unit)
625 {
626         const struct netdev_boot_setup *s = dev_boot_setup;
627         char name[IFNAMSIZ];
628         int i;
629
630         sprintf(name, "%s%d", prefix, unit);
631
632         /*
633          * If device already registered then return base of 1
634          * to indicate not to probe for this interface
635          */
636         if (__dev_get_by_name(&init_net, name))
637                 return 1;
638
639         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
640                 if (!strcmp(name, s[i].name))
641                         return s[i].map.base_addr;
642         return 0;
643 }
644
645 /*
646  * Saves at boot time configured settings for any netdevice.
647  */
648 int __init netdev_boot_setup(char *str)
649 {
650         int ints[5];
651         struct ifmap map;
652
653         str = get_options(str, ARRAY_SIZE(ints), ints);
654         if (!str || !*str)
655                 return 0;
656
657         /* Save settings */
658         memset(&map, 0, sizeof(map));
659         if (ints[0] > 0)
660                 map.irq = ints[1];
661         if (ints[0] > 1)
662                 map.base_addr = ints[2];
663         if (ints[0] > 2)
664                 map.mem_start = ints[3];
665         if (ints[0] > 3)
666                 map.mem_end = ints[4];
667
668         /* Add new entry to the list */
669         return netdev_boot_setup_add(str, &map);
670 }
671
672 __setup("netdev=", netdev_boot_setup);
673
674 /*******************************************************************************
675  *
676  *                          Device Interface Subroutines
677  *
678  *******************************************************************************/
679
680 /**
681  *      dev_get_iflink  - get 'iflink' value of a interface
682  *      @dev: targeted interface
683  *
684  *      Indicates the ifindex the interface is linked to.
685  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
686  */
687
688 int dev_get_iflink(const struct net_device *dev)
689 {
690         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
691                 return dev->netdev_ops->ndo_get_iflink(dev);
692
693         return dev->ifindex;
694 }
695 EXPORT_SYMBOL(dev_get_iflink);
696
697 /**
698  *      dev_fill_metadata_dst - Retrieve tunnel egress information.
699  *      @dev: targeted interface
700  *      @skb: The packet.
701  *
702  *      For better visibility of tunnel traffic OVS needs to retrieve
703  *      egress tunnel information for a packet. Following API allows
704  *      user to get this info.
705  */
706 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
707 {
708         struct ip_tunnel_info *info;
709
710         if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
711                 return -EINVAL;
712
713         info = skb_tunnel_info_unclone(skb);
714         if (!info)
715                 return -ENOMEM;
716         if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
717                 return -EINVAL;
718
719         return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
720 }
721 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
722
723 /**
724  *      __dev_get_by_name       - find a device by its name
725  *      @net: the applicable net namespace
726  *      @name: name to find
727  *
728  *      Find an interface by name. Must be called under RTNL semaphore
729  *      or @dev_base_lock. If the name is found a pointer to the device
730  *      is returned. If the name is not found then %NULL is returned. The
731  *      reference counters are not incremented so the caller must be
732  *      careful with locks.
733  */
734
735 struct net_device *__dev_get_by_name(struct net *net, const char *name)
736 {
737         struct net_device *dev;
738         struct hlist_head *head = dev_name_hash(net, name);
739
740         hlist_for_each_entry(dev, head, name_hlist)
741                 if (!strncmp(dev->name, name, IFNAMSIZ))
742                         return dev;
743
744         return NULL;
745 }
746 EXPORT_SYMBOL(__dev_get_by_name);
747
748 /**
749  * dev_get_by_name_rcu  - find a device by its name
750  * @net: the applicable net namespace
751  * @name: name to find
752  *
753  * Find an interface by name.
754  * If the name is found a pointer to the device is returned.
755  * If the name is not found then %NULL is returned.
756  * The reference counters are not incremented so the caller must be
757  * careful with locks. The caller must hold RCU lock.
758  */
759
760 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
761 {
762         struct net_device *dev;
763         struct hlist_head *head = dev_name_hash(net, name);
764
765         hlist_for_each_entry_rcu(dev, head, name_hlist)
766                 if (!strncmp(dev->name, name, IFNAMSIZ))
767                         return dev;
768
769         return NULL;
770 }
771 EXPORT_SYMBOL(dev_get_by_name_rcu);
772
773 /**
774  *      dev_get_by_name         - find a device by its name
775  *      @net: the applicable net namespace
776  *      @name: name to find
777  *
778  *      Find an interface by name. This can be called from any
779  *      context and does its own locking. The returned handle has
780  *      the usage count incremented and the caller must use dev_put() to
781  *      release it when it is no longer needed. %NULL is returned if no
782  *      matching device is found.
783  */
784
785 struct net_device *dev_get_by_name(struct net *net, const char *name)
786 {
787         struct net_device *dev;
788
789         rcu_read_lock();
790         dev = dev_get_by_name_rcu(net, name);
791         if (dev)
792                 dev_hold(dev);
793         rcu_read_unlock();
794         return dev;
795 }
796 EXPORT_SYMBOL(dev_get_by_name);
797
798 /**
799  *      __dev_get_by_index - find a device by its ifindex
800  *      @net: the applicable net namespace
801  *      @ifindex: index of device
802  *
803  *      Search for an interface by index. Returns %NULL if the device
804  *      is not found or a pointer to the device. The device has not
805  *      had its reference counter increased so the caller must be careful
806  *      about locking. The caller must hold either the RTNL semaphore
807  *      or @dev_base_lock.
808  */
809
810 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
811 {
812         struct net_device *dev;
813         struct hlist_head *head = dev_index_hash(net, ifindex);
814
815         hlist_for_each_entry(dev, head, index_hlist)
816                 if (dev->ifindex == ifindex)
817                         return dev;
818
819         return NULL;
820 }
821 EXPORT_SYMBOL(__dev_get_by_index);
822
823 /**
824  *      dev_get_by_index_rcu - find a device by its ifindex
825  *      @net: the applicable net namespace
826  *      @ifindex: index of device
827  *
828  *      Search for an interface by index. Returns %NULL if the device
829  *      is not found or a pointer to the device. The device has not
830  *      had its reference counter increased so the caller must be careful
831  *      about locking. The caller must hold RCU lock.
832  */
833
834 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
835 {
836         struct net_device *dev;
837         struct hlist_head *head = dev_index_hash(net, ifindex);
838
839         hlist_for_each_entry_rcu(dev, head, index_hlist)
840                 if (dev->ifindex == ifindex)
841                         return dev;
842
843         return NULL;
844 }
845 EXPORT_SYMBOL(dev_get_by_index_rcu);
846
847
848 /**
849  *      dev_get_by_index - find a device by its ifindex
850  *      @net: the applicable net namespace
851  *      @ifindex: index of device
852  *
853  *      Search for an interface by index. Returns NULL if the device
854  *      is not found or a pointer to the device. The device returned has
855  *      had a reference added and the pointer is safe until the user calls
856  *      dev_put to indicate they have finished with it.
857  */
858
859 struct net_device *dev_get_by_index(struct net *net, int ifindex)
860 {
861         struct net_device *dev;
862
863         rcu_read_lock();
864         dev = dev_get_by_index_rcu(net, ifindex);
865         if (dev)
866                 dev_hold(dev);
867         rcu_read_unlock();
868         return dev;
869 }
870 EXPORT_SYMBOL(dev_get_by_index);
871
872 /**
873  *      dev_get_by_napi_id - find a device by napi_id
874  *      @napi_id: ID of the NAPI struct
875  *
876  *      Search for an interface by NAPI ID. Returns %NULL if the device
877  *      is not found or a pointer to the device. The device has not had
878  *      its reference counter increased so the caller must be careful
879  *      about locking. The caller must hold RCU lock.
880  */
881
882 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
883 {
884         struct napi_struct *napi;
885
886         WARN_ON_ONCE(!rcu_read_lock_held());
887
888         if (napi_id < MIN_NAPI_ID)
889                 return NULL;
890
891         napi = napi_by_id(napi_id);
892
893         return napi ? napi->dev : NULL;
894 }
895 EXPORT_SYMBOL(dev_get_by_napi_id);
896
897 /**
898  *      netdev_get_name - get a netdevice name, knowing its ifindex.
899  *      @net: network namespace
900  *      @name: a pointer to the buffer where the name will be stored.
901  *      @ifindex: the ifindex of the interface to get the name from.
902  *
903  *      The use of raw_seqcount_begin() and cond_resched() before
904  *      retrying is required as we want to give the writers a chance
905  *      to complete when CONFIG_PREEMPT is not set.
906  */
907 int netdev_get_name(struct net *net, char *name, int ifindex)
908 {
909         struct net_device *dev;
910         unsigned int seq;
911
912 retry:
913         seq = raw_seqcount_begin(&devnet_rename_seq);
914         rcu_read_lock();
915         dev = dev_get_by_index_rcu(net, ifindex);
916         if (!dev) {
917                 rcu_read_unlock();
918                 return -ENODEV;
919         }
920
921         strcpy(name, dev->name);
922         rcu_read_unlock();
923         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
924                 cond_resched();
925                 goto retry;
926         }
927
928         return 0;
929 }
930
931 /**
932  *      dev_getbyhwaddr_rcu - find a device by its hardware address
933  *      @net: the applicable net namespace
934  *      @type: media type of device
935  *      @ha: hardware address
936  *
937  *      Search for an interface by MAC address. Returns NULL if the device
938  *      is not found or a pointer to the device.
939  *      The caller must hold RCU or RTNL.
940  *      The returned device has not had its ref count increased
941  *      and the caller must therefore be careful about locking
942  *
943  */
944
945 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
946                                        const char *ha)
947 {
948         struct net_device *dev;
949
950         for_each_netdev_rcu(net, dev)
951                 if (dev->type == type &&
952                     !memcmp(dev->dev_addr, ha, dev->addr_len))
953                         return dev;
954
955         return NULL;
956 }
957 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
958
959 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
960 {
961         struct net_device *dev;
962
963         ASSERT_RTNL();
964         for_each_netdev(net, dev)
965                 if (dev->type == type)
966                         return dev;
967
968         return NULL;
969 }
970 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
971
972 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
973 {
974         struct net_device *dev, *ret = NULL;
975
976         rcu_read_lock();
977         for_each_netdev_rcu(net, dev)
978                 if (dev->type == type) {
979                         dev_hold(dev);
980                         ret = dev;
981                         break;
982                 }
983         rcu_read_unlock();
984         return ret;
985 }
986 EXPORT_SYMBOL(dev_getfirstbyhwtype);
987
988 /**
989  *      __dev_get_by_flags - find any device with given flags
990  *      @net: the applicable net namespace
991  *      @if_flags: IFF_* values
992  *      @mask: bitmask of bits in if_flags to check
993  *
994  *      Search for any interface with the given flags. Returns NULL if a device
995  *      is not found or a pointer to the device. Must be called inside
996  *      rtnl_lock(), and result refcount is unchanged.
997  */
998
999 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000                                       unsigned short mask)
1001 {
1002         struct net_device *dev, *ret;
1003
1004         ASSERT_RTNL();
1005
1006         ret = NULL;
1007         for_each_netdev(net, dev) {
1008                 if (((dev->flags ^ if_flags) & mask) == 0) {
1009                         ret = dev;
1010                         break;
1011                 }
1012         }
1013         return ret;
1014 }
1015 EXPORT_SYMBOL(__dev_get_by_flags);
1016
1017 /**
1018  *      dev_valid_name - check if name is okay for network device
1019  *      @name: name string
1020  *
1021  *      Network device names need to be valid file names to
1022  *      to allow sysfs to work.  We also disallow any kind of
1023  *      whitespace.
1024  */
1025 bool dev_valid_name(const char *name)
1026 {
1027         if (*name == '\0')
1028                 return false;
1029         if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1030                 return false;
1031         if (!strcmp(name, ".") || !strcmp(name, ".."))
1032                 return false;
1033
1034         while (*name) {
1035                 if (*name == '/' || *name == ':' || isspace(*name))
1036                         return false;
1037                 name++;
1038         }
1039         return true;
1040 }
1041 EXPORT_SYMBOL(dev_valid_name);
1042
1043 /**
1044  *      __dev_alloc_name - allocate a name for a device
1045  *      @net: network namespace to allocate the device name in
1046  *      @name: name format string
1047  *      @buf:  scratch buffer and result name string
1048  *
1049  *      Passed a format string - eg "lt%d" it will try and find a suitable
1050  *      id. It scans list of devices to build up a free map, then chooses
1051  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1052  *      while allocating the name and adding the device in order to avoid
1053  *      duplicates.
1054  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055  *      Returns the number of the unit assigned or a negative errno code.
1056  */
1057
1058 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1059 {
1060         int i = 0;
1061         const char *p;
1062         const int max_netdevices = 8*PAGE_SIZE;
1063         unsigned long *inuse;
1064         struct net_device *d;
1065
1066         if (!dev_valid_name(name))
1067                 return -EINVAL;
1068
1069         p = strchr(name, '%');
1070         if (p) {
1071                 /*
1072                  * Verify the string as this thing may have come from
1073                  * the user.  There must be either one "%d" and no other "%"
1074                  * characters.
1075                  */
1076                 if (p[1] != 'd' || strchr(p + 2, '%'))
1077                         return -EINVAL;
1078
1079                 /* Use one page as a bit array of possible slots */
1080                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1081                 if (!inuse)
1082                         return -ENOMEM;
1083
1084                 for_each_netdev(net, d) {
1085                         if (!sscanf(d->name, name, &i))
1086                                 continue;
1087                         if (i < 0 || i >= max_netdevices)
1088                                 continue;
1089
1090                         /*  avoid cases where sscanf is not exact inverse of printf */
1091                         snprintf(buf, IFNAMSIZ, name, i);
1092                         if (!strncmp(buf, d->name, IFNAMSIZ))
1093                                 set_bit(i, inuse);
1094                 }
1095
1096                 i = find_first_zero_bit(inuse, max_netdevices);
1097                 free_page((unsigned long) inuse);
1098         }
1099
1100         snprintf(buf, IFNAMSIZ, name, i);
1101         if (!__dev_get_by_name(net, buf))
1102                 return i;
1103
1104         /* It is possible to run out of possible slots
1105          * when the name is long and there isn't enough space left
1106          * for the digits, or if all bits are used.
1107          */
1108         return -ENFILE;
1109 }
1110
1111 static int dev_alloc_name_ns(struct net *net,
1112                              struct net_device *dev,
1113                              const char *name)
1114 {
1115         char buf[IFNAMSIZ];
1116         int ret;
1117
1118         BUG_ON(!net);
1119         ret = __dev_alloc_name(net, name, buf);
1120         if (ret >= 0)
1121                 strlcpy(dev->name, buf, IFNAMSIZ);
1122         return ret;
1123 }
1124
1125 /**
1126  *      dev_alloc_name - allocate a name for a device
1127  *      @dev: device
1128  *      @name: name format string
1129  *
1130  *      Passed a format string - eg "lt%d" it will try and find a suitable
1131  *      id. It scans list of devices to build up a free map, then chooses
1132  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1133  *      while allocating the name and adding the device in order to avoid
1134  *      duplicates.
1135  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1136  *      Returns the number of the unit assigned or a negative errno code.
1137  */
1138
1139 int dev_alloc_name(struct net_device *dev, const char *name)
1140 {
1141         return dev_alloc_name_ns(dev_net(dev), dev, name);
1142 }
1143 EXPORT_SYMBOL(dev_alloc_name);
1144
1145 int dev_get_valid_name(struct net *net, struct net_device *dev,
1146                        const char *name)
1147 {
1148         BUG_ON(!net);
1149
1150         if (!dev_valid_name(name))
1151                 return -EINVAL;
1152
1153         if (strchr(name, '%'))
1154                 return dev_alloc_name_ns(net, dev, name);
1155         else if (__dev_get_by_name(net, name))
1156                 return -EEXIST;
1157         else if (dev->name != name)
1158                 strlcpy(dev->name, name, IFNAMSIZ);
1159
1160         return 0;
1161 }
1162 EXPORT_SYMBOL(dev_get_valid_name);
1163
1164 /**
1165  *      dev_change_name - change name of a device
1166  *      @dev: device
1167  *      @newname: name (or format string) must be at least IFNAMSIZ
1168  *
1169  *      Change name of a device, can pass format strings "eth%d".
1170  *      for wildcarding.
1171  */
1172 int dev_change_name(struct net_device *dev, const char *newname)
1173 {
1174         unsigned char old_assign_type;
1175         char oldname[IFNAMSIZ];
1176         int err = 0;
1177         int ret;
1178         struct net *net;
1179
1180         ASSERT_RTNL();
1181         BUG_ON(!dev_net(dev));
1182
1183         net = dev_net(dev);
1184         if (dev->flags & IFF_UP)
1185                 return -EBUSY;
1186
1187         write_seqcount_begin(&devnet_rename_seq);
1188
1189         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1190                 write_seqcount_end(&devnet_rename_seq);
1191                 return 0;
1192         }
1193
1194         memcpy(oldname, dev->name, IFNAMSIZ);
1195
1196         err = dev_get_valid_name(net, dev, newname);
1197         if (err < 0) {
1198                 write_seqcount_end(&devnet_rename_seq);
1199                 return err;
1200         }
1201
1202         if (oldname[0] && !strchr(oldname, '%'))
1203                 netdev_info(dev, "renamed from %s\n", oldname);
1204
1205         old_assign_type = dev->name_assign_type;
1206         dev->name_assign_type = NET_NAME_RENAMED;
1207
1208 rollback:
1209         ret = device_rename(&dev->dev, dev->name);
1210         if (ret) {
1211                 memcpy(dev->name, oldname, IFNAMSIZ);
1212                 dev->name_assign_type = old_assign_type;
1213                 write_seqcount_end(&devnet_rename_seq);
1214                 return ret;
1215         }
1216
1217         write_seqcount_end(&devnet_rename_seq);
1218
1219         netdev_adjacent_rename_links(dev, oldname);
1220
1221         write_lock_bh(&dev_base_lock);
1222         hlist_del_rcu(&dev->name_hlist);
1223         write_unlock_bh(&dev_base_lock);
1224
1225         synchronize_rcu();
1226
1227         write_lock_bh(&dev_base_lock);
1228         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1229         write_unlock_bh(&dev_base_lock);
1230
1231         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1232         ret = notifier_to_errno(ret);
1233
1234         if (ret) {
1235                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1236                 if (err >= 0) {
1237                         err = ret;
1238                         write_seqcount_begin(&devnet_rename_seq);
1239                         memcpy(dev->name, oldname, IFNAMSIZ);
1240                         memcpy(oldname, newname, IFNAMSIZ);
1241                         dev->name_assign_type = old_assign_type;
1242                         old_assign_type = NET_NAME_RENAMED;
1243                         goto rollback;
1244                 } else {
1245                         pr_err("%s: name change rollback failed: %d\n",
1246                                dev->name, ret);
1247                 }
1248         }
1249
1250         return err;
1251 }
1252
1253 /**
1254  *      dev_set_alias - change ifalias of a device
1255  *      @dev: device
1256  *      @alias: name up to IFALIASZ
1257  *      @len: limit of bytes to copy from info
1258  *
1259  *      Set ifalias for a device,
1260  */
1261 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1262 {
1263         struct dev_ifalias *new_alias = NULL;
1264
1265         if (len >= IFALIASZ)
1266                 return -EINVAL;
1267
1268         if (len) {
1269                 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1270                 if (!new_alias)
1271                         return -ENOMEM;
1272
1273                 memcpy(new_alias->ifalias, alias, len);
1274                 new_alias->ifalias[len] = 0;
1275         }
1276
1277         mutex_lock(&ifalias_mutex);
1278         rcu_swap_protected(dev->ifalias, new_alias,
1279                            mutex_is_locked(&ifalias_mutex));
1280         mutex_unlock(&ifalias_mutex);
1281
1282         if (new_alias)
1283                 kfree_rcu(new_alias, rcuhead);
1284
1285         return len;
1286 }
1287 EXPORT_SYMBOL(dev_set_alias);
1288
1289 /**
1290  *      dev_get_alias - get ifalias of a device
1291  *      @dev: device
1292  *      @name: buffer to store name of ifalias
1293  *      @len: size of buffer
1294  *
1295  *      get ifalias for a device.  Caller must make sure dev cannot go
1296  *      away,  e.g. rcu read lock or own a reference count to device.
1297  */
1298 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299 {
1300         const struct dev_ifalias *alias;
1301         int ret = 0;
1302
1303         rcu_read_lock();
1304         alias = rcu_dereference(dev->ifalias);
1305         if (alias)
1306                 ret = snprintf(name, len, "%s", alias->ifalias);
1307         rcu_read_unlock();
1308
1309         return ret;
1310 }
1311
1312 /**
1313  *      netdev_features_change - device changes features
1314  *      @dev: device to cause notification
1315  *
1316  *      Called to indicate a device has changed features.
1317  */
1318 void netdev_features_change(struct net_device *dev)
1319 {
1320         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1321 }
1322 EXPORT_SYMBOL(netdev_features_change);
1323
1324 /**
1325  *      netdev_state_change - device changes state
1326  *      @dev: device to cause notification
1327  *
1328  *      Called to indicate a device has changed state. This function calls
1329  *      the notifier chains for netdev_chain and sends a NEWLINK message
1330  *      to the routing socket.
1331  */
1332 void netdev_state_change(struct net_device *dev)
1333 {
1334         if (dev->flags & IFF_UP) {
1335                 struct netdev_notifier_change_info change_info = {
1336                         .info.dev = dev,
1337                 };
1338
1339                 call_netdevice_notifiers_info(NETDEV_CHANGE,
1340                                               &change_info.info);
1341                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1342         }
1343 }
1344 EXPORT_SYMBOL(netdev_state_change);
1345
1346 /**
1347  * netdev_notify_peers - notify network peers about existence of @dev
1348  * @dev: network device
1349  *
1350  * Generate traffic such that interested network peers are aware of
1351  * @dev, such as by generating a gratuitous ARP. This may be used when
1352  * a device wants to inform the rest of the network about some sort of
1353  * reconfiguration such as a failover event or virtual machine
1354  * migration.
1355  */
1356 void netdev_notify_peers(struct net_device *dev)
1357 {
1358         rtnl_lock();
1359         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1360         call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1361         rtnl_unlock();
1362 }
1363 EXPORT_SYMBOL(netdev_notify_peers);
1364
1365 static int __dev_open(struct net_device *dev)
1366 {
1367         const struct net_device_ops *ops = dev->netdev_ops;
1368         int ret;
1369
1370         ASSERT_RTNL();
1371
1372         if (!netif_device_present(dev))
1373                 return -ENODEV;
1374
1375         /* Block netpoll from trying to do any rx path servicing.
1376          * If we don't do this there is a chance ndo_poll_controller
1377          * or ndo_poll may be running while we open the device
1378          */
1379         netpoll_poll_disable(dev);
1380
1381         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382         ret = notifier_to_errno(ret);
1383         if (ret)
1384                 return ret;
1385
1386         set_bit(__LINK_STATE_START, &dev->state);
1387
1388         if (ops->ndo_validate_addr)
1389                 ret = ops->ndo_validate_addr(dev);
1390
1391         if (!ret && ops->ndo_open)
1392                 ret = ops->ndo_open(dev);
1393
1394         netpoll_poll_enable(dev);
1395
1396         if (ret)
1397                 clear_bit(__LINK_STATE_START, &dev->state);
1398         else {
1399                 dev->flags |= IFF_UP;
1400                 dev_set_rx_mode(dev);
1401                 dev_activate(dev);
1402                 add_device_randomness(dev->dev_addr, dev->addr_len);
1403         }
1404
1405         return ret;
1406 }
1407
1408 /**
1409  *      dev_open        - prepare an interface for use.
1410  *      @dev:   device to open
1411  *
1412  *      Takes a device from down to up state. The device's private open
1413  *      function is invoked and then the multicast lists are loaded. Finally
1414  *      the device is moved into the up state and a %NETDEV_UP message is
1415  *      sent to the netdev notifier chain.
1416  *
1417  *      Calling this function on an active interface is a nop. On a failure
1418  *      a negative errno code is returned.
1419  */
1420 int dev_open(struct net_device *dev)
1421 {
1422         int ret;
1423
1424         if (dev->flags & IFF_UP)
1425                 return 0;
1426
1427         ret = __dev_open(dev);
1428         if (ret < 0)
1429                 return ret;
1430
1431         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1432         call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434         return ret;
1435 }
1436 EXPORT_SYMBOL(dev_open);
1437
1438 static void __dev_close_many(struct list_head *head)
1439 {
1440         struct net_device *dev;
1441
1442         ASSERT_RTNL();
1443         might_sleep();
1444
1445         list_for_each_entry(dev, head, close_list) {
1446                 /* Temporarily disable netpoll until the interface is down */
1447                 netpoll_poll_disable(dev);
1448
1449                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1450
1451                 clear_bit(__LINK_STATE_START, &dev->state);
1452
1453                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454                  * can be even on different cpu. So just clear netif_running().
1455                  *
1456                  * dev->stop() will invoke napi_disable() on all of it's
1457                  * napi_struct instances on this device.
1458                  */
1459                 smp_mb__after_atomic(); /* Commit netif_running(). */
1460         }
1461
1462         dev_deactivate_many(head);
1463
1464         list_for_each_entry(dev, head, close_list) {
1465                 const struct net_device_ops *ops = dev->netdev_ops;
1466
1467                 /*
1468                  *      Call the device specific close. This cannot fail.
1469                  *      Only if device is UP
1470                  *
1471                  *      We allow it to be called even after a DETACH hot-plug
1472                  *      event.
1473                  */
1474                 if (ops->ndo_stop)
1475                         ops->ndo_stop(dev);
1476
1477                 dev->flags &= ~IFF_UP;
1478                 netpoll_poll_enable(dev);
1479         }
1480 }
1481
1482 static void __dev_close(struct net_device *dev)
1483 {
1484         LIST_HEAD(single);
1485
1486         list_add(&dev->close_list, &single);
1487         __dev_close_many(&single);
1488         list_del(&single);
1489 }
1490
1491 void dev_close_many(struct list_head *head, bool unlink)
1492 {
1493         struct net_device *dev, *tmp;
1494
1495         /* Remove the devices that don't need to be closed */
1496         list_for_each_entry_safe(dev, tmp, head, close_list)
1497                 if (!(dev->flags & IFF_UP))
1498                         list_del_init(&dev->close_list);
1499
1500         __dev_close_many(head);
1501
1502         list_for_each_entry_safe(dev, tmp, head, close_list) {
1503                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1504                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1505                 if (unlink)
1506                         list_del_init(&dev->close_list);
1507         }
1508 }
1509 EXPORT_SYMBOL(dev_close_many);
1510
1511 /**
1512  *      dev_close - shutdown an interface.
1513  *      @dev: device to shutdown
1514  *
1515  *      This function moves an active device into down state. A
1516  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518  *      chain.
1519  */
1520 void dev_close(struct net_device *dev)
1521 {
1522         if (dev->flags & IFF_UP) {
1523                 LIST_HEAD(single);
1524
1525                 list_add(&dev->close_list, &single);
1526                 dev_close_many(&single, true);
1527                 list_del(&single);
1528         }
1529 }
1530 EXPORT_SYMBOL(dev_close);
1531
1532
1533 /**
1534  *      dev_disable_lro - disable Large Receive Offload on a device
1535  *      @dev: device
1536  *
1537  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1538  *      called under RTNL.  This is needed if received packets may be
1539  *      forwarded to another interface.
1540  */
1541 void dev_disable_lro(struct net_device *dev)
1542 {
1543         struct net_device *lower_dev;
1544         struct list_head *iter;
1545
1546         dev->wanted_features &= ~NETIF_F_LRO;
1547         netdev_update_features(dev);
1548
1549         if (unlikely(dev->features & NETIF_F_LRO))
1550                 netdev_WARN(dev, "failed to disable LRO!\n");
1551
1552         netdev_for_each_lower_dev(dev, lower_dev, iter)
1553                 dev_disable_lro(lower_dev);
1554 }
1555 EXPORT_SYMBOL(dev_disable_lro);
1556
1557 /**
1558  *      dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1559  *      @dev: device
1560  *
1561  *      Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1562  *      called under RTNL.  This is needed if Generic XDP is installed on
1563  *      the device.
1564  */
1565 static void dev_disable_gro_hw(struct net_device *dev)
1566 {
1567         dev->wanted_features &= ~NETIF_F_GRO_HW;
1568         netdev_update_features(dev);
1569
1570         if (unlikely(dev->features & NETIF_F_GRO_HW))
1571                 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1572 }
1573
1574 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1575 {
1576 #define N(val)                                          \
1577         case NETDEV_##val:                              \
1578                 return "NETDEV_" __stringify(val);
1579         switch (cmd) {
1580         N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1581         N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1582         N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1583         N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1584         N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1585         N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1586         N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1587         N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1588         N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1589         }
1590 #undef N
1591         return "UNKNOWN_NETDEV_EVENT";
1592 }
1593 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1594
1595 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1596                                    struct net_device *dev)
1597 {
1598         struct netdev_notifier_info info = {
1599                 .dev = dev,
1600         };
1601
1602         return nb->notifier_call(nb, val, &info);
1603 }
1604
1605 static int dev_boot_phase = 1;
1606
1607 /**
1608  * register_netdevice_notifier - register a network notifier block
1609  * @nb: notifier
1610  *
1611  * Register a notifier to be called when network device events occur.
1612  * The notifier passed is linked into the kernel structures and must
1613  * not be reused until it has been unregistered. A negative errno code
1614  * is returned on a failure.
1615  *
1616  * When registered all registration and up events are replayed
1617  * to the new notifier to allow device to have a race free
1618  * view of the network device list.
1619  */
1620
1621 int register_netdevice_notifier(struct notifier_block *nb)
1622 {
1623         struct net_device *dev;
1624         struct net_device *last;
1625         struct net *net;
1626         int err;
1627
1628         /* Close race with setup_net() and cleanup_net() */
1629         down_write(&pernet_ops_rwsem);
1630         rtnl_lock();
1631         err = raw_notifier_chain_register(&netdev_chain, nb);
1632         if (err)
1633                 goto unlock;
1634         if (dev_boot_phase)
1635                 goto unlock;
1636         for_each_net(net) {
1637                 for_each_netdev(net, dev) {
1638                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1639                         err = notifier_to_errno(err);
1640                         if (err)
1641                                 goto rollback;
1642
1643                         if (!(dev->flags & IFF_UP))
1644                                 continue;
1645
1646                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1647                 }
1648         }
1649
1650 unlock:
1651         rtnl_unlock();
1652         up_write(&pernet_ops_rwsem);
1653         return err;
1654
1655 rollback:
1656         last = dev;
1657         for_each_net(net) {
1658                 for_each_netdev(net, dev) {
1659                         if (dev == last)
1660                                 goto outroll;
1661
1662                         if (dev->flags & IFF_UP) {
1663                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1664                                                         dev);
1665                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1666                         }
1667                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1668                 }
1669         }
1670
1671 outroll:
1672         raw_notifier_chain_unregister(&netdev_chain, nb);
1673         goto unlock;
1674 }
1675 EXPORT_SYMBOL(register_netdevice_notifier);
1676
1677 /**
1678  * unregister_netdevice_notifier - unregister a network notifier block
1679  * @nb: notifier
1680  *
1681  * Unregister a notifier previously registered by
1682  * register_netdevice_notifier(). The notifier is unlinked into the
1683  * kernel structures and may then be reused. A negative errno code
1684  * is returned on a failure.
1685  *
1686  * After unregistering unregister and down device events are synthesized
1687  * for all devices on the device list to the removed notifier to remove
1688  * the need for special case cleanup code.
1689  */
1690
1691 int unregister_netdevice_notifier(struct notifier_block *nb)
1692 {
1693         struct net_device *dev;
1694         struct net *net;
1695         int err;
1696
1697         /* Close race with setup_net() and cleanup_net() */
1698         down_write(&pernet_ops_rwsem);
1699         rtnl_lock();
1700         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1701         if (err)
1702                 goto unlock;
1703
1704         for_each_net(net) {
1705                 for_each_netdev(net, dev) {
1706                         if (dev->flags & IFF_UP) {
1707                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1708                                                         dev);
1709                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1710                         }
1711                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1712                 }
1713         }
1714 unlock:
1715         rtnl_unlock();
1716         up_write(&pernet_ops_rwsem);
1717         return err;
1718 }
1719 EXPORT_SYMBOL(unregister_netdevice_notifier);
1720
1721 /**
1722  *      call_netdevice_notifiers_info - call all network notifier blocks
1723  *      @val: value passed unmodified to notifier function
1724  *      @info: notifier information data
1725  *
1726  *      Call all network notifier blocks.  Parameters and return value
1727  *      are as for raw_notifier_call_chain().
1728  */
1729
1730 static int call_netdevice_notifiers_info(unsigned long val,
1731                                          struct netdev_notifier_info *info)
1732 {
1733         ASSERT_RTNL();
1734         return raw_notifier_call_chain(&netdev_chain, val, info);
1735 }
1736
1737 /**
1738  *      call_netdevice_notifiers - call all network notifier blocks
1739  *      @val: value passed unmodified to notifier function
1740  *      @dev: net_device pointer passed unmodified to notifier function
1741  *
1742  *      Call all network notifier blocks.  Parameters and return value
1743  *      are as for raw_notifier_call_chain().
1744  */
1745
1746 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1747 {
1748         struct netdev_notifier_info info = {
1749                 .dev = dev,
1750         };
1751
1752         return call_netdevice_notifiers_info(val, &info);
1753 }
1754 EXPORT_SYMBOL(call_netdevice_notifiers);
1755
1756 #ifdef CONFIG_NET_INGRESS
1757 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1758
1759 void net_inc_ingress_queue(void)
1760 {
1761         static_branch_inc(&ingress_needed_key);
1762 }
1763 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1764
1765 void net_dec_ingress_queue(void)
1766 {
1767         static_branch_dec(&ingress_needed_key);
1768 }
1769 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1770 #endif
1771
1772 #ifdef CONFIG_NET_EGRESS
1773 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1774
1775 void net_inc_egress_queue(void)
1776 {
1777         static_branch_inc(&egress_needed_key);
1778 }
1779 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1780
1781 void net_dec_egress_queue(void)
1782 {
1783         static_branch_dec(&egress_needed_key);
1784 }
1785 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1786 #endif
1787
1788 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
1789 #ifdef HAVE_JUMP_LABEL
1790 static atomic_t netstamp_needed_deferred;
1791 static atomic_t netstamp_wanted;
1792 static void netstamp_clear(struct work_struct *work)
1793 {
1794         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1795         int wanted;
1796
1797         wanted = atomic_add_return(deferred, &netstamp_wanted);
1798         if (wanted > 0)
1799                 static_branch_enable(&netstamp_needed_key);
1800         else
1801                 static_branch_disable(&netstamp_needed_key);
1802 }
1803 static DECLARE_WORK(netstamp_work, netstamp_clear);
1804 #endif
1805
1806 void net_enable_timestamp(void)
1807 {
1808 #ifdef HAVE_JUMP_LABEL
1809         int wanted;
1810
1811         while (1) {
1812                 wanted = atomic_read(&netstamp_wanted);
1813                 if (wanted <= 0)
1814                         break;
1815                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1816                         return;
1817         }
1818         atomic_inc(&netstamp_needed_deferred);
1819         schedule_work(&netstamp_work);
1820 #else
1821         static_branch_inc(&netstamp_needed_key);
1822 #endif
1823 }
1824 EXPORT_SYMBOL(net_enable_timestamp);
1825
1826 void net_disable_timestamp(void)
1827 {
1828 #ifdef HAVE_JUMP_LABEL
1829         int wanted;
1830
1831         while (1) {
1832                 wanted = atomic_read(&netstamp_wanted);
1833                 if (wanted <= 1)
1834                         break;
1835                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1836                         return;
1837         }
1838         atomic_dec(&netstamp_needed_deferred);
1839         schedule_work(&netstamp_work);
1840 #else
1841         static_branch_dec(&netstamp_needed_key);
1842 #endif
1843 }
1844 EXPORT_SYMBOL(net_disable_timestamp);
1845
1846 static inline void net_timestamp_set(struct sk_buff *skb)
1847 {
1848         skb->tstamp = 0;
1849         if (static_branch_unlikely(&netstamp_needed_key))
1850                 __net_timestamp(skb);
1851 }
1852
1853 #define net_timestamp_check(COND, SKB)                          \
1854         if (static_branch_unlikely(&netstamp_needed_key)) {     \
1855                 if ((COND) && !(SKB)->tstamp)                   \
1856                         __net_timestamp(SKB);                   \
1857         }                                                       \
1858
1859 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1860 {
1861         unsigned int len;
1862
1863         if (!(dev->flags & IFF_UP))
1864                 return false;
1865
1866         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1867         if (skb->len <= len)
1868                 return true;
1869
1870         /* if TSO is enabled, we don't care about the length as the packet
1871          * could be forwarded without being segmented before
1872          */
1873         if (skb_is_gso(skb))
1874                 return true;
1875
1876         return false;
1877 }
1878 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1879
1880 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1881 {
1882         int ret = ____dev_forward_skb(dev, skb);
1883
1884         if (likely(!ret)) {
1885                 skb->protocol = eth_type_trans(skb, dev);
1886                 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1887         }
1888
1889         return ret;
1890 }
1891 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1892
1893 /**
1894  * dev_forward_skb - loopback an skb to another netif
1895  *
1896  * @dev: destination network device
1897  * @skb: buffer to forward
1898  *
1899  * return values:
1900  *      NET_RX_SUCCESS  (no congestion)
1901  *      NET_RX_DROP     (packet was dropped, but freed)
1902  *
1903  * dev_forward_skb can be used for injecting an skb from the
1904  * start_xmit function of one device into the receive queue
1905  * of another device.
1906  *
1907  * The receiving device may be in another namespace, so
1908  * we have to clear all information in the skb that could
1909  * impact namespace isolation.
1910  */
1911 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1912 {
1913         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1914 }
1915 EXPORT_SYMBOL_GPL(dev_forward_skb);
1916
1917 static inline int deliver_skb(struct sk_buff *skb,
1918                               struct packet_type *pt_prev,
1919                               struct net_device *orig_dev)
1920 {
1921         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1922                 return -ENOMEM;
1923         refcount_inc(&skb->users);
1924         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1925 }
1926
1927 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1928                                           struct packet_type **pt,
1929                                           struct net_device *orig_dev,
1930                                           __be16 type,
1931                                           struct list_head *ptype_list)
1932 {
1933         struct packet_type *ptype, *pt_prev = *pt;
1934
1935         list_for_each_entry_rcu(ptype, ptype_list, list) {
1936                 if (ptype->type != type)
1937                         continue;
1938                 if (pt_prev)
1939                         deliver_skb(skb, pt_prev, orig_dev);
1940                 pt_prev = ptype;
1941         }
1942         *pt = pt_prev;
1943 }
1944
1945 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1946 {
1947         if (!ptype->af_packet_priv || !skb->sk)
1948                 return false;
1949
1950         if (ptype->id_match)
1951                 return ptype->id_match(ptype, skb->sk);
1952         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1953                 return true;
1954
1955         return false;
1956 }
1957
1958 /*
1959  *      Support routine. Sends outgoing frames to any network
1960  *      taps currently in use.
1961  */
1962
1963 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1964 {
1965         struct packet_type *ptype;
1966         struct sk_buff *skb2 = NULL;
1967         struct packet_type *pt_prev = NULL;
1968         struct list_head *ptype_list = &ptype_all;
1969
1970         rcu_read_lock();
1971 again:
1972         list_for_each_entry_rcu(ptype, ptype_list, list) {
1973                 /* Never send packets back to the socket
1974                  * they originated from - MvS (miquels@drinkel.ow.org)
1975                  */
1976                 if (skb_loop_sk(ptype, skb))
1977                         continue;
1978
1979                 if (pt_prev) {
1980                         deliver_skb(skb2, pt_prev, skb->dev);
1981                         pt_prev = ptype;
1982                         continue;
1983                 }
1984
1985                 /* need to clone skb, done only once */
1986                 skb2 = skb_clone(skb, GFP_ATOMIC);
1987                 if (!skb2)
1988                         goto out_unlock;
1989
1990                 net_timestamp_set(skb2);
1991
1992                 /* skb->nh should be correctly
1993                  * set by sender, so that the second statement is
1994                  * just protection against buggy protocols.
1995                  */
1996                 skb_reset_mac_header(skb2);
1997
1998                 if (skb_network_header(skb2) < skb2->data ||
1999                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2000                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2001                                              ntohs(skb2->protocol),
2002                                              dev->name);
2003                         skb_reset_network_header(skb2);
2004                 }
2005
2006                 skb2->transport_header = skb2->network_header;
2007                 skb2->pkt_type = PACKET_OUTGOING;
2008                 pt_prev = ptype;
2009         }
2010
2011         if (ptype_list == &ptype_all) {
2012                 ptype_list = &dev->ptype_all;
2013                 goto again;
2014         }
2015 out_unlock:
2016         if (pt_prev) {
2017                 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2018                         pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2019                 else
2020                         kfree_skb(skb2);
2021         }
2022         rcu_read_unlock();
2023 }
2024 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2025
2026 /**
2027  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2028  * @dev: Network device
2029  * @txq: number of queues available
2030  *
2031  * If real_num_tx_queues is changed the tc mappings may no longer be
2032  * valid. To resolve this verify the tc mapping remains valid and if
2033  * not NULL the mapping. With no priorities mapping to this
2034  * offset/count pair it will no longer be used. In the worst case TC0
2035  * is invalid nothing can be done so disable priority mappings. If is
2036  * expected that drivers will fix this mapping if they can before
2037  * calling netif_set_real_num_tx_queues.
2038  */
2039 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2040 {
2041         int i;
2042         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2043
2044         /* If TC0 is invalidated disable TC mapping */
2045         if (tc->offset + tc->count > txq) {
2046                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2047                 dev->num_tc = 0;
2048                 return;
2049         }
2050
2051         /* Invalidated prio to tc mappings set to TC0 */
2052         for (i = 1; i < TC_BITMASK + 1; i++) {
2053                 int q = netdev_get_prio_tc_map(dev, i);
2054
2055                 tc = &dev->tc_to_txq[q];
2056                 if (tc->offset + tc->count > txq) {
2057                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2058                                 i, q);
2059                         netdev_set_prio_tc_map(dev, i, 0);
2060                 }
2061         }
2062 }
2063
2064 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2065 {
2066         if (dev->num_tc) {
2067                 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2068                 int i;
2069
2070                 /* walk through the TCs and see if it falls into any of them */
2071                 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2072                         if ((txq - tc->offset) < tc->count)
2073                                 return i;
2074                 }
2075
2076                 /* didn't find it, just return -1 to indicate no match */
2077                 return -1;
2078         }
2079
2080         return 0;
2081 }
2082 EXPORT_SYMBOL(netdev_txq_to_tc);
2083
2084 #ifdef CONFIG_XPS
2085 struct static_key xps_needed __read_mostly;
2086 EXPORT_SYMBOL(xps_needed);
2087 struct static_key xps_rxqs_needed __read_mostly;
2088 EXPORT_SYMBOL(xps_rxqs_needed);
2089 static DEFINE_MUTEX(xps_map_mutex);
2090 #define xmap_dereference(P)             \
2091         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2092
2093 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2094                              int tci, u16 index)
2095 {
2096         struct xps_map *map = NULL;
2097         int pos;
2098
2099         if (dev_maps)
2100                 map = xmap_dereference(dev_maps->attr_map[tci]);
2101         if (!map)
2102                 return false;
2103
2104         for (pos = map->len; pos--;) {
2105                 if (map->queues[pos] != index)
2106                         continue;
2107
2108                 if (map->len > 1) {
2109                         map->queues[pos] = map->queues[--map->len];
2110                         break;
2111                 }
2112
2113                 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2114                 kfree_rcu(map, rcu);
2115                 return false;
2116         }
2117
2118         return true;
2119 }
2120
2121 static bool remove_xps_queue_cpu(struct net_device *dev,
2122                                  struct xps_dev_maps *dev_maps,
2123                                  int cpu, u16 offset, u16 count)
2124 {
2125         int num_tc = dev->num_tc ? : 1;
2126         bool active = false;
2127         int tci;
2128
2129         for (tci = cpu * num_tc; num_tc--; tci++) {
2130                 int i, j;
2131
2132                 for (i = count, j = offset; i--; j++) {
2133                         if (!remove_xps_queue(dev_maps, tci, j))
2134                                 break;
2135                 }
2136
2137                 active |= i < 0;
2138         }
2139
2140         return active;
2141 }
2142
2143 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2144                            struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2145                            u16 offset, u16 count, bool is_rxqs_map)
2146 {
2147         bool active = false;
2148         int i, j;
2149
2150         for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2151              j < nr_ids;)
2152                 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2153                                                count);
2154         if (!active) {
2155                 if (is_rxqs_map) {
2156                         RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2157                 } else {
2158                         RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2159
2160                         for (i = offset + (count - 1); count--; i--)
2161                                 netdev_queue_numa_node_write(
2162                                         netdev_get_tx_queue(dev, i),
2163                                                         NUMA_NO_NODE);
2164                 }
2165                 kfree_rcu(dev_maps, rcu);
2166         }
2167 }
2168
2169 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2170                                    u16 count)
2171 {
2172         const unsigned long *possible_mask = NULL;
2173         struct xps_dev_maps *dev_maps;
2174         unsigned int nr_ids;
2175
2176         if (!static_key_false(&xps_needed))
2177                 return;
2178
2179         mutex_lock(&xps_map_mutex);
2180
2181         if (static_key_false(&xps_rxqs_needed)) {
2182                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2183                 if (dev_maps) {
2184                         nr_ids = dev->num_rx_queues;
2185                         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2186                                        offset, count, true);
2187                 }
2188         }
2189
2190         dev_maps = xmap_dereference(dev->xps_cpus_map);
2191         if (!dev_maps)
2192                 goto out_no_maps;
2193
2194         if (num_possible_cpus() > 1)
2195                 possible_mask = cpumask_bits(cpu_possible_mask);
2196         nr_ids = nr_cpu_ids;
2197         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2198                        false);
2199
2200 out_no_maps:
2201         if (static_key_enabled(&xps_rxqs_needed))
2202                 static_key_slow_dec(&xps_rxqs_needed);
2203
2204         static_key_slow_dec(&xps_needed);
2205         mutex_unlock(&xps_map_mutex);
2206 }
2207
2208 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2209 {
2210         netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2211 }
2212
2213 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2214                                       u16 index, bool is_rxqs_map)
2215 {
2216         struct xps_map *new_map;
2217         int alloc_len = XPS_MIN_MAP_ALLOC;
2218         int i, pos;
2219
2220         for (pos = 0; map && pos < map->len; pos++) {
2221                 if (map->queues[pos] != index)
2222                         continue;
2223                 return map;
2224         }
2225
2226         /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2227         if (map) {
2228                 if (pos < map->alloc_len)
2229                         return map;
2230
2231                 alloc_len = map->alloc_len * 2;
2232         }
2233
2234         /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2235          *  map
2236          */
2237         if (is_rxqs_map)
2238                 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2239         else
2240                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2241                                        cpu_to_node(attr_index));
2242         if (!new_map)
2243                 return NULL;
2244
2245         for (i = 0; i < pos; i++)
2246                 new_map->queues[i] = map->queues[i];
2247         new_map->alloc_len = alloc_len;
2248         new_map->len = pos;
2249
2250         return new_map;
2251 }
2252
2253 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2254                           u16 index, bool is_rxqs_map)
2255 {
2256         const unsigned long *online_mask = NULL, *possible_mask = NULL;
2257         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2258         int i, j, tci, numa_node_id = -2;
2259         int maps_sz, num_tc = 1, tc = 0;
2260         struct xps_map *map, *new_map;
2261         bool active = false;
2262         unsigned int nr_ids;
2263
2264         if (dev->num_tc) {
2265                 /* Do not allow XPS on subordinate device directly */
2266                 num_tc = dev->num_tc;
2267                 if (num_tc < 0)
2268                         return -EINVAL;
2269
2270                 /* If queue belongs to subordinate dev use its map */
2271                 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2272
2273                 tc = netdev_txq_to_tc(dev, index);
2274                 if (tc < 0)
2275                         return -EINVAL;
2276         }
2277
2278         mutex_lock(&xps_map_mutex);
2279         if (is_rxqs_map) {
2280                 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2281                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2282                 nr_ids = dev->num_rx_queues;
2283         } else {
2284                 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2285                 if (num_possible_cpus() > 1) {
2286                         online_mask = cpumask_bits(cpu_online_mask);
2287                         possible_mask = cpumask_bits(cpu_possible_mask);
2288                 }
2289                 dev_maps = xmap_dereference(dev->xps_cpus_map);
2290                 nr_ids = nr_cpu_ids;
2291         }
2292
2293         if (maps_sz < L1_CACHE_BYTES)
2294                 maps_sz = L1_CACHE_BYTES;
2295
2296         /* allocate memory for queue storage */
2297         for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2298              j < nr_ids;) {
2299                 if (!new_dev_maps)
2300                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2301                 if (!new_dev_maps) {
2302                         mutex_unlock(&xps_map_mutex);
2303                         return -ENOMEM;
2304                 }
2305
2306                 tci = j * num_tc + tc;
2307                 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2308                                  NULL;
2309
2310                 map = expand_xps_map(map, j, index, is_rxqs_map);
2311                 if (!map)
2312                         goto error;
2313
2314                 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2315         }
2316
2317         if (!new_dev_maps)
2318                 goto out_no_new_maps;
2319
2320         static_key_slow_inc(&xps_needed);
2321         if (is_rxqs_map)
2322                 static_key_slow_inc(&xps_rxqs_needed);
2323
2324         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2325              j < nr_ids;) {
2326                 /* copy maps belonging to foreign traffic classes */
2327                 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2328                         /* fill in the new device map from the old device map */
2329                         map = xmap_dereference(dev_maps->attr_map[tci]);
2330                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2331                 }
2332
2333                 /* We need to explicitly update tci as prevous loop
2334                  * could break out early if dev_maps is NULL.
2335                  */
2336                 tci = j * num_tc + tc;
2337
2338                 if (netif_attr_test_mask(j, mask, nr_ids) &&
2339                     netif_attr_test_online(j, online_mask, nr_ids)) {
2340                         /* add tx-queue to CPU/rx-queue maps */
2341                         int pos = 0;
2342
2343                         map = xmap_dereference(new_dev_maps->attr_map[tci]);
2344                         while ((pos < map->len) && (map->queues[pos] != index))
2345                                 pos++;
2346
2347                         if (pos == map->len)
2348                                 map->queues[map->len++] = index;
2349 #ifdef CONFIG_NUMA
2350                         if (!is_rxqs_map) {
2351                                 if (numa_node_id == -2)
2352                                         numa_node_id = cpu_to_node(j);
2353                                 else if (numa_node_id != cpu_to_node(j))
2354                                         numa_node_id = -1;
2355                         }
2356 #endif
2357                 } else if (dev_maps) {
2358                         /* fill in the new device map from the old device map */
2359                         map = xmap_dereference(dev_maps->attr_map[tci]);
2360                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2361                 }
2362
2363                 /* copy maps belonging to foreign traffic classes */
2364                 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2365                         /* fill in the new device map from the old device map */
2366                         map = xmap_dereference(dev_maps->attr_map[tci]);
2367                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2368                 }
2369         }
2370
2371         if (is_rxqs_map)
2372                 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2373         else
2374                 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2375
2376         /* Cleanup old maps */
2377         if (!dev_maps)
2378                 goto out_no_old_maps;
2379
2380         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2381              j < nr_ids;) {
2382                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2383                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2384                         map = xmap_dereference(dev_maps->attr_map[tci]);
2385                         if (map && map != new_map)
2386                                 kfree_rcu(map, rcu);
2387                 }
2388         }
2389
2390         kfree_rcu(dev_maps, rcu);
2391
2392 out_no_old_maps:
2393         dev_maps = new_dev_maps;
2394         active = true;
2395
2396 out_no_new_maps:
2397         if (!is_rxqs_map) {
2398                 /* update Tx queue numa node */
2399                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2400                                              (numa_node_id >= 0) ?
2401                                              numa_node_id : NUMA_NO_NODE);
2402         }
2403
2404         if (!dev_maps)
2405                 goto out_no_maps;
2406
2407         /* removes tx-queue from unused CPUs/rx-queues */
2408         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2409              j < nr_ids;) {
2410                 for (i = tc, tci = j * num_tc; i--; tci++)
2411                         active |= remove_xps_queue(dev_maps, tci, index);
2412                 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2413                     !netif_attr_test_online(j, online_mask, nr_ids))
2414                         active |= remove_xps_queue(dev_maps, tci, index);
2415                 for (i = num_tc - tc, tci++; --i; tci++)
2416                         active |= remove_xps_queue(dev_maps, tci, index);
2417         }
2418
2419         /* free map if not active */
2420         if (!active) {
2421                 if (is_rxqs_map)
2422                         RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2423                 else
2424                         RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2425                 kfree_rcu(dev_maps, rcu);
2426         }
2427
2428 out_no_maps:
2429         mutex_unlock(&xps_map_mutex);
2430
2431         return 0;
2432 error:
2433         /* remove any maps that we added */
2434         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2435              j < nr_ids;) {
2436                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2437                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2438                         map = dev_maps ?
2439                               xmap_dereference(dev_maps->attr_map[tci]) :
2440                               NULL;
2441                         if (new_map && new_map != map)
2442                                 kfree(new_map);
2443                 }
2444         }
2445
2446         mutex_unlock(&xps_map_mutex);
2447
2448         kfree(new_dev_maps);
2449         return -ENOMEM;
2450 }
2451
2452 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2453                         u16 index)
2454 {
2455         return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2456 }
2457 EXPORT_SYMBOL(netif_set_xps_queue);
2458
2459 #endif
2460 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2461 {
2462         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2463
2464         /* Unbind any subordinate channels */
2465         while (txq-- != &dev->_tx[0]) {
2466                 if (txq->sb_dev)
2467                         netdev_unbind_sb_channel(dev, txq->sb_dev);
2468         }
2469 }
2470
2471 void netdev_reset_tc(struct net_device *dev)
2472 {
2473 #ifdef CONFIG_XPS
2474         netif_reset_xps_queues_gt(dev, 0);
2475 #endif
2476         netdev_unbind_all_sb_channels(dev);
2477
2478         /* Reset TC configuration of device */
2479         dev->num_tc = 0;
2480         memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2481         memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2482 }
2483 EXPORT_SYMBOL(netdev_reset_tc);
2484
2485 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2486 {
2487         if (tc >= dev->num_tc)
2488                 return -EINVAL;
2489
2490 #ifdef CONFIG_XPS
2491         netif_reset_xps_queues(dev, offset, count);
2492 #endif
2493         dev->tc_to_txq[tc].count = count;
2494         dev->tc_to_txq[tc].offset = offset;
2495         return 0;
2496 }
2497 EXPORT_SYMBOL(netdev_set_tc_queue);
2498
2499 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2500 {
2501         if (num_tc > TC_MAX_QUEUE)
2502                 return -EINVAL;
2503
2504 #ifdef CONFIG_XPS
2505         netif_reset_xps_queues_gt(dev, 0);
2506 #endif
2507         netdev_unbind_all_sb_channels(dev);
2508
2509         dev->num_tc = num_tc;
2510         return 0;
2511 }
2512 EXPORT_SYMBOL(netdev_set_num_tc);
2513
2514 void netdev_unbind_sb_channel(struct net_device *dev,
2515                               struct net_device *sb_dev)
2516 {
2517         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2518
2519 #ifdef CONFIG_XPS
2520         netif_reset_xps_queues_gt(sb_dev, 0);
2521 #endif
2522         memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2523         memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2524
2525         while (txq-- != &dev->_tx[0]) {
2526                 if (txq->sb_dev == sb_dev)
2527                         txq->sb_dev = NULL;
2528         }
2529 }
2530 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2531
2532 int netdev_bind_sb_channel_queue(struct net_device *dev,
2533                                  struct net_device *sb_dev,
2534                                  u8 tc, u16 count, u16 offset)
2535 {
2536         /* Make certain the sb_dev and dev are already configured */
2537         if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2538                 return -EINVAL;
2539
2540         /* We cannot hand out queues we don't have */
2541         if ((offset + count) > dev->real_num_tx_queues)
2542                 return -EINVAL;
2543
2544         /* Record the mapping */
2545         sb_dev->tc_to_txq[tc].count = count;
2546         sb_dev->tc_to_txq[tc].offset = offset;
2547
2548         /* Provide a way for Tx queue to find the tc_to_txq map or
2549          * XPS map for itself.
2550          */
2551         while (count--)
2552                 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2553
2554         return 0;
2555 }
2556 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2557
2558 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2559 {
2560         /* Do not use a multiqueue device to represent a subordinate channel */
2561         if (netif_is_multiqueue(dev))
2562                 return -ENODEV;
2563
2564         /* We allow channels 1 - 32767 to be used for subordinate channels.
2565          * Channel 0 is meant to be "native" mode and used only to represent
2566          * the main root device. We allow writing 0 to reset the device back
2567          * to normal mode after being used as a subordinate channel.
2568          */
2569         if (channel > S16_MAX)
2570                 return -EINVAL;
2571
2572         dev->num_tc = -channel;
2573
2574         return 0;
2575 }
2576 EXPORT_SYMBOL(netdev_set_sb_channel);
2577
2578 /*
2579  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2580  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2581  */
2582 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2583 {
2584         bool disabling;
2585         int rc;
2586
2587         disabling = txq < dev->real_num_tx_queues;
2588
2589         if (txq < 1 || txq > dev->num_tx_queues)
2590                 return -EINVAL;
2591
2592         if (dev->reg_state == NETREG_REGISTERED ||
2593             dev->reg_state == NETREG_UNREGISTERING) {
2594                 ASSERT_RTNL();
2595
2596                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2597                                                   txq);
2598                 if (rc)
2599                         return rc;
2600
2601                 if (dev->num_tc)
2602                         netif_setup_tc(dev, txq);
2603
2604                 dev->real_num_tx_queues = txq;
2605
2606                 if (disabling) {
2607                         synchronize_net();
2608                         qdisc_reset_all_tx_gt(dev, txq);
2609 #ifdef CONFIG_XPS
2610                         netif_reset_xps_queues_gt(dev, txq);
2611 #endif
2612                 }
2613         } else {
2614                 dev->real_num_tx_queues = txq;
2615         }
2616
2617         return 0;
2618 }
2619 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2620
2621 #ifdef CONFIG_SYSFS
2622 /**
2623  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2624  *      @dev: Network device
2625  *      @rxq: Actual number of RX queues
2626  *
2627  *      This must be called either with the rtnl_lock held or before
2628  *      registration of the net device.  Returns 0 on success, or a
2629  *      negative error code.  If called before registration, it always
2630  *      succeeds.
2631  */
2632 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2633 {
2634         int rc;
2635
2636         if (rxq < 1 || rxq > dev->num_rx_queues)
2637                 return -EINVAL;
2638
2639         if (dev->reg_state == NETREG_REGISTERED) {
2640                 ASSERT_RTNL();
2641
2642                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2643                                                   rxq);
2644                 if (rc)
2645                         return rc;
2646         }
2647
2648         dev->real_num_rx_queues = rxq;
2649         return 0;
2650 }
2651 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2652 #endif
2653
2654 /**
2655  * netif_get_num_default_rss_queues - default number of RSS queues
2656  *
2657  * This routine should set an upper limit on the number of RSS queues
2658  * used by default by multiqueue devices.
2659  */
2660 int netif_get_num_default_rss_queues(void)
2661 {
2662         return is_kdump_kernel() ?
2663                 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2664 }
2665 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2666
2667 static void __netif_reschedule(struct Qdisc *q)
2668 {
2669         struct softnet_data *sd;
2670         unsigned long flags;
2671
2672         local_irq_save(flags);
2673         sd = this_cpu_ptr(&softnet_data);
2674         q->next_sched = NULL;
2675         *sd->output_queue_tailp = q;
2676         sd->output_queue_tailp = &q->next_sched;
2677         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2678         local_irq_restore(flags);
2679 }
2680
2681 void __netif_schedule(struct Qdisc *q)
2682 {
2683         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2684                 __netif_reschedule(q);
2685 }
2686 EXPORT_SYMBOL(__netif_schedule);
2687
2688 struct dev_kfree_skb_cb {
2689         enum skb_free_reason reason;
2690 };
2691
2692 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2693 {
2694         return (struct dev_kfree_skb_cb *)skb->cb;
2695 }
2696
2697 void netif_schedule_queue(struct netdev_queue *txq)
2698 {
2699         rcu_read_lock();
2700         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2701                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2702
2703                 __netif_schedule(q);
2704         }
2705         rcu_read_unlock();
2706 }
2707 EXPORT_SYMBOL(netif_schedule_queue);
2708
2709 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2710 {
2711         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2712                 struct Qdisc *q;
2713
2714                 rcu_read_lock();
2715                 q = rcu_dereference(dev_queue->qdisc);
2716                 __netif_schedule(q);
2717                 rcu_read_unlock();
2718         }
2719 }
2720 EXPORT_SYMBOL(netif_tx_wake_queue);
2721
2722 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2723 {
2724         unsigned long flags;
2725
2726         if (unlikely(!skb))
2727                 return;
2728
2729         if (likely(refcount_read(&skb->users) == 1)) {
2730                 smp_rmb();
2731                 refcount_set(&skb->users, 0);
2732         } else if (likely(!refcount_dec_and_test(&skb->users))) {
2733                 return;
2734         }
2735         get_kfree_skb_cb(skb)->reason = reason;
2736         local_irq_save(flags);
2737         skb->next = __this_cpu_read(softnet_data.completion_queue);
2738         __this_cpu_write(softnet_data.completion_queue, skb);
2739         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2740         local_irq_restore(flags);
2741 }
2742 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2743
2744 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2745 {
2746         if (in_irq() || irqs_disabled())
2747                 __dev_kfree_skb_irq(skb, reason);
2748         else
2749                 dev_kfree_skb(skb);
2750 }
2751 EXPORT_SYMBOL(__dev_kfree_skb_any);
2752
2753
2754 /**
2755  * netif_device_detach - mark device as removed
2756  * @dev: network device
2757  *
2758  * Mark device as removed from system and therefore no longer available.
2759  */
2760 void netif_device_detach(struct net_device *dev)
2761 {
2762         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2763             netif_running(dev)) {
2764                 netif_tx_stop_all_queues(dev);
2765         }
2766 }
2767 EXPORT_SYMBOL(netif_device_detach);
2768
2769 /**
2770  * netif_device_attach - mark device as attached
2771  * @dev: network device
2772  *
2773  * Mark device as attached from system and restart if needed.
2774  */
2775 void netif_device_attach(struct net_device *dev)
2776 {
2777         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2778             netif_running(dev)) {
2779                 netif_tx_wake_all_queues(dev);
2780                 __netdev_watchdog_up(dev);
2781         }
2782 }
2783 EXPORT_SYMBOL(netif_device_attach);
2784
2785 /*
2786  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2787  * to be used as a distribution range.
2788  */
2789 static u16 skb_tx_hash(const struct net_device *dev,
2790                        const struct net_device *sb_dev,
2791                        struct sk_buff *skb)
2792 {
2793         u32 hash;
2794         u16 qoffset = 0;
2795         u16 qcount = dev->real_num_tx_queues;
2796
2797         if (dev->num_tc) {
2798                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2799
2800                 qoffset = sb_dev->tc_to_txq[tc].offset;
2801                 qcount = sb_dev->tc_to_txq[tc].count;
2802         }
2803
2804         if (skb_rx_queue_recorded(skb)) {
2805                 hash = skb_get_rx_queue(skb);
2806                 while (unlikely(hash >= qcount))
2807                         hash -= qcount;
2808                 return hash + qoffset;
2809         }
2810
2811         return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2812 }
2813
2814 static void skb_warn_bad_offload(const struct sk_buff *skb)
2815 {
2816         static const netdev_features_t null_features;
2817         struct net_device *dev = skb->dev;
2818         const char *name = "";
2819
2820         if (!net_ratelimit())
2821                 return;
2822
2823         if (dev) {
2824                 if (dev->dev.parent)
2825                         name = dev_driver_string(dev->dev.parent);
2826                 else
2827                         name = netdev_name(dev);
2828         }
2829         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2830              "gso_type=%d ip_summed=%d\n",
2831              name, dev ? &dev->features : &null_features,
2832              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2833              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2834              skb_shinfo(skb)->gso_type, skb->ip_summed);
2835 }
2836
2837 /*
2838  * Invalidate hardware checksum when packet is to be mangled, and
2839  * complete checksum manually on outgoing path.
2840  */
2841 int skb_checksum_help(struct sk_buff *skb)
2842 {
2843         __wsum csum;
2844         int ret = 0, offset;
2845
2846         if (skb->ip_summed == CHECKSUM_COMPLETE)
2847                 goto out_set_summed;
2848
2849         if (unlikely(skb_shinfo(skb)->gso_size)) {
2850                 skb_warn_bad_offload(skb);
2851                 return -EINVAL;
2852         }
2853
2854         /* Before computing a checksum, we should make sure no frag could
2855          * be modified by an external entity : checksum could be wrong.
2856          */
2857         if (skb_has_shared_frag(skb)) {
2858                 ret = __skb_linearize(skb);
2859                 if (ret)
2860                         goto out;
2861         }
2862
2863         offset = skb_checksum_start_offset(skb);
2864         BUG_ON(offset >= skb_headlen(skb));
2865         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2866
2867         offset += skb->csum_offset;
2868         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2869
2870         if (skb_cloned(skb) &&
2871             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2872                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2873                 if (ret)
2874                         goto out;
2875         }
2876
2877         *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2878 out_set_summed:
2879         skb->ip_summed = CHECKSUM_NONE;
2880 out:
2881         return ret;
2882 }
2883 EXPORT_SYMBOL(skb_checksum_help);
2884
2885 int skb_crc32c_csum_help(struct sk_buff *skb)
2886 {
2887         __le32 crc32c_csum;
2888         int ret = 0, offset, start;
2889
2890         if (skb->ip_summed != CHECKSUM_PARTIAL)
2891                 goto out;
2892
2893         if (unlikely(skb_is_gso(skb)))
2894                 goto out;
2895
2896         /* Before computing a checksum, we should make sure no frag could
2897          * be modified by an external entity : checksum could be wrong.
2898          */
2899         if (unlikely(skb_has_shared_frag(skb))) {
2900                 ret = __skb_linearize(skb);
2901                 if (ret)
2902                         goto out;
2903         }
2904         start = skb_checksum_start_offset(skb);
2905         offset = start + offsetof(struct sctphdr, checksum);
2906         if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2907                 ret = -EINVAL;
2908                 goto out;
2909         }
2910         if (skb_cloned(skb) &&
2911             !skb_clone_writable(skb, offset + sizeof(__le32))) {
2912                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2913                 if (ret)
2914                         goto out;
2915         }
2916         crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2917                                                   skb->len - start, ~(__u32)0,
2918                                                   crc32c_csum_stub));
2919         *(__le32 *)(skb->data + offset) = crc32c_csum;
2920         skb->ip_summed = CHECKSUM_NONE;
2921         skb->csum_not_inet = 0;
2922 out:
2923         return ret;
2924 }
2925
2926 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2927 {
2928         __be16 type = skb->protocol;
2929
2930         /* Tunnel gso handlers can set protocol to ethernet. */
2931         if (type == htons(ETH_P_TEB)) {
2932                 struct ethhdr *eth;
2933
2934                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2935                         return 0;
2936
2937                 eth = (struct ethhdr *)skb->data;
2938                 type = eth->h_proto;
2939         }
2940
2941         return __vlan_get_protocol(skb, type, depth);
2942 }
2943
2944 /**
2945  *      skb_mac_gso_segment - mac layer segmentation handler.
2946  *      @skb: buffer to segment
2947  *      @features: features for the output path (see dev->features)
2948  */
2949 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2950                                     netdev_features_t features)
2951 {
2952         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2953         struct packet_offload *ptype;
2954         int vlan_depth = skb->mac_len;
2955         __be16 type = skb_network_protocol(skb, &vlan_depth);
2956
2957         if (unlikely(!type))
2958                 return ERR_PTR(-EINVAL);
2959
2960         __skb_pull(skb, vlan_depth);
2961
2962         rcu_read_lock();
2963         list_for_each_entry_rcu(ptype, &offload_base, list) {
2964                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2965                         segs = ptype->callbacks.gso_segment(skb, features);
2966                         break;
2967                 }
2968         }
2969         rcu_read_unlock();
2970
2971         __skb_push(skb, skb->data - skb_mac_header(skb));
2972
2973         return segs;
2974 }
2975 EXPORT_SYMBOL(skb_mac_gso_segment);
2976
2977
2978 /* openvswitch calls this on rx path, so we need a different check.
2979  */
2980 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2981 {
2982         if (tx_path)
2983                 return skb->ip_summed != CHECKSUM_PARTIAL &&
2984                        skb->ip_summed != CHECKSUM_UNNECESSARY;
2985
2986         return skb->ip_summed == CHECKSUM_NONE;
2987 }
2988
2989 /**
2990  *      __skb_gso_segment - Perform segmentation on skb.
2991  *      @skb: buffer to segment
2992  *      @features: features for the output path (see dev->features)
2993  *      @tx_path: whether it is called in TX path
2994  *
2995  *      This function segments the given skb and returns a list of segments.
2996  *
2997  *      It may return NULL if the skb requires no segmentation.  This is
2998  *      only possible when GSO is used for verifying header integrity.
2999  *
3000  *      Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
3001  */
3002 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3003                                   netdev_features_t features, bool tx_path)
3004 {
3005         struct sk_buff *segs;
3006
3007         if (unlikely(skb_needs_check(skb, tx_path))) {
3008                 int err;
3009
3010                 /* We're going to init ->check field in TCP or UDP header */
3011                 err = skb_cow_head(skb, 0);
3012                 if (err < 0)
3013                         return ERR_PTR(err);
3014         }
3015
3016         /* Only report GSO partial support if it will enable us to
3017          * support segmentation on this frame without needing additional
3018          * work.
3019          */
3020         if (features & NETIF_F_GSO_PARTIAL) {
3021                 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3022                 struct net_device *dev = skb->dev;
3023
3024                 partial_features |= dev->features & dev->gso_partial_features;
3025                 if (!skb_gso_ok(skb, features | partial_features))
3026                         features &= ~NETIF_F_GSO_PARTIAL;
3027         }
3028
3029         BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3030                      sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3031
3032         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3033         SKB_GSO_CB(skb)->encap_level = 0;
3034
3035         skb_reset_mac_header(skb);
3036         skb_reset_mac_len(skb);
3037
3038         segs = skb_mac_gso_segment(skb, features);
3039
3040         if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3041                 skb_warn_bad_offload(skb);
3042
3043         return segs;
3044 }
3045 EXPORT_SYMBOL(__skb_gso_segment);
3046
3047 /* Take action when hardware reception checksum errors are detected. */
3048 #ifdef CONFIG_BUG
3049 void netdev_rx_csum_fault(struct net_device *dev)
3050 {
3051         if (net_ratelimit()) {
3052                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3053                 dump_stack();
3054         }
3055 }
3056 EXPORT_SYMBOL(netdev_rx_csum_fault);
3057 #endif
3058
3059 /* XXX: check that highmem exists at all on the given machine. */
3060 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3061 {
3062 #ifdef CONFIG_HIGHMEM
3063         int i;
3064
3065         if (!(dev->features & NETIF_F_HIGHDMA)) {
3066                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3067                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3068
3069                         if (PageHighMem(skb_frag_page(frag)))
3070                                 return 1;
3071                 }
3072         }
3073 #endif
3074         return 0;
3075 }
3076
3077 /* If MPLS offload request, verify we are testing hardware MPLS features
3078  * instead of standard features for the netdev.
3079  */
3080 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3081 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3082                                            netdev_features_t features,
3083                                            __be16 type)
3084 {
3085         if (eth_p_mpls(type))
3086                 features &= skb->dev->mpls_features;
3087
3088         return features;
3089 }
3090 #else
3091 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3092                                            netdev_features_t features,
3093                                            __be16 type)
3094 {
3095         return features;
3096 }
3097 #endif
3098
3099 static netdev_features_t harmonize_features(struct sk_buff *skb,
3100         netdev_features_t features)
3101 {
3102         int tmp;
3103         __be16 type;
3104
3105         type = skb_network_protocol(skb, &tmp);
3106         features = net_mpls_features(skb, features, type);
3107
3108         if (skb->ip_summed != CHECKSUM_NONE &&
3109             !can_checksum_protocol(features, type)) {
3110                 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3111         }
3112         if (illegal_highdma(skb->dev, skb))
3113                 features &= ~NETIF_F_SG;
3114
3115         return features;
3116 }
3117
3118 netdev_features_t passthru_features_check(struct sk_buff *skb,
3119                                           struct net_device *dev,
3120                                           netdev_features_t features)
3121 {
3122         return features;
3123 }
3124 EXPORT_SYMBOL(passthru_features_check);
3125
3126 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3127                                              struct net_device *dev,
3128                                              netdev_features_t features)
3129 {
3130         return vlan_features_check(skb, features);
3131 }
3132
3133 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3134                                             struct net_device *dev,
3135                                             netdev_features_t features)
3136 {
3137         u16 gso_segs = skb_shinfo(skb)->gso_segs;
3138
3139         if (gso_segs > dev->gso_max_segs)
3140                 return features & ~NETIF_F_GSO_MASK;
3141
3142         /* Support for GSO partial features requires software
3143          * intervention before we can actually process the packets
3144          * so we need to strip support for any partial features now
3145          * and we can pull them back in after we have partially
3146          * segmented the frame.
3147          */
3148         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3149                 features &= ~dev->gso_partial_features;
3150
3151         /* Make sure to clear the IPv4 ID mangling feature if the
3152          * IPv4 header has the potential to be fragmented.
3153          */
3154         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3155                 struct iphdr *iph = skb->encapsulation ?
3156                                     inner_ip_hdr(skb) : ip_hdr(skb);
3157
3158                 if (!(iph->frag_off & htons(IP_DF)))
3159                         features &= ~NETIF_F_TSO_MANGLEID;
3160         }
3161
3162         return features;
3163 }
3164
3165 netdev_features_t netif_skb_features(struct sk_buff *skb)
3166 {
3167         struct net_device *dev = skb->dev;
3168         netdev_features_t features = dev->features;
3169
3170         if (skb_is_gso(skb))
3171                 features = gso_features_check(skb, dev, features);
3172
3173         /* If encapsulation offload request, verify we are testing
3174          * hardware encapsulation features instead of standard
3175          * features for the netdev
3176          */
3177         if (skb->encapsulation)
3178                 features &= dev->hw_enc_features;
3179
3180         if (skb_vlan_tagged(skb))
3181                 features = netdev_intersect_features(features,
3182                                                      dev->vlan_features |
3183                                                      NETIF_F_HW_VLAN_CTAG_TX |
3184                                                      NETIF_F_HW_VLAN_STAG_TX);
3185
3186         if (dev->netdev_ops->ndo_features_check)
3187                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3188                                                                 features);
3189         else
3190                 features &= dflt_features_check(skb, dev, features);
3191
3192         return harmonize_features(skb, features);
3193 }
3194 EXPORT_SYMBOL(netif_skb_features);
3195
3196 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3197                     struct netdev_queue *txq, bool more)
3198 {
3199         unsigned int len;
3200         int rc;
3201
3202         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
3203                 dev_queue_xmit_nit(skb, dev);
3204
3205         len = skb->len;
3206         trace_net_dev_start_xmit(skb, dev);
3207         rc = netdev_start_xmit(skb, dev, txq, more);
3208         trace_net_dev_xmit(skb, rc, dev, len);
3209
3210         return rc;
3211 }
3212
3213 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3214                                     struct netdev_queue *txq, int *ret)
3215 {
3216         struct sk_buff *skb = first;
3217         int rc = NETDEV_TX_OK;
3218
3219         while (skb) {
3220                 struct sk_buff *next = skb->next;
3221
3222                 skb->next = NULL;
3223                 rc = xmit_one(skb, dev, txq, next != NULL);
3224                 if (unlikely(!dev_xmit_complete(rc))) {
3225                         skb->next = next;
3226                         goto out;
3227                 }
3228
3229                 skb = next;
3230                 if (netif_xmit_stopped(txq) && skb) {
3231                         rc = NETDEV_TX_BUSY;
3232                         break;
3233                 }
3234         }
3235
3236 out:
3237         *ret = rc;
3238         return skb;
3239 }
3240
3241 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3242                                           netdev_features_t features)
3243 {
3244         if (skb_vlan_tag_present(skb) &&
3245             !vlan_hw_offload_capable(features, skb->vlan_proto))
3246                 skb = __vlan_hwaccel_push_inside(skb);
3247         return skb;
3248 }
3249
3250 int skb_csum_hwoffload_help(struct sk_buff *skb,
3251                             const netdev_features_t features)
3252 {
3253         if (unlikely(skb->csum_not_inet))
3254                 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3255                         skb_crc32c_csum_help(skb);
3256
3257         return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3258 }
3259 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3260
3261 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3262 {
3263         netdev_features_t features;
3264
3265         features = netif_skb_features(skb);
3266         skb = validate_xmit_vlan(skb, features);
3267         if (unlikely(!skb))
3268                 goto out_null;
3269
3270         skb = sk_validate_xmit_skb(skb, dev);
3271         if (unlikely(!skb))
3272                 goto out_null;
3273
3274         if (netif_needs_gso(skb, features)) {
3275                 struct sk_buff *segs;
3276
3277                 segs = skb_gso_segment(skb, features);
3278                 if (IS_ERR(segs)) {
3279                         goto out_kfree_skb;
3280                 } else if (segs) {
3281                         consume_skb(skb);
3282                         skb = segs;
3283                 }
3284         } else {
3285                 if (skb_needs_linearize(skb, features) &&
3286                     __skb_linearize(skb))
3287                         goto out_kfree_skb;
3288
3289                 /* If packet is not checksummed and device does not
3290                  * support checksumming for this protocol, complete
3291                  * checksumming here.
3292                  */
3293                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3294                         if (skb->encapsulation)
3295                                 skb_set_inner_transport_header(skb,
3296                                                                skb_checksum_start_offset(skb));
3297                         else
3298                                 skb_set_transport_header(skb,
3299                                                          skb_checksum_start_offset(skb));
3300                         if (skb_csum_hwoffload_help(skb, features))
3301                                 goto out_kfree_skb;
3302                 }
3303         }
3304
3305         skb = validate_xmit_xfrm(skb, features, again);
3306
3307         return skb;
3308
3309 out_kfree_skb:
3310         kfree_skb(skb);
3311 out_null:
3312         atomic_long_inc(&dev->tx_dropped);
3313         return NULL;
3314 }
3315
3316 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3317 {
3318         struct sk_buff *next, *head = NULL, *tail;
3319
3320         for (; skb != NULL; skb = next) {
3321                 next = skb->next;
3322                 skb->next = NULL;
3323
3324                 /* in case skb wont be segmented, point to itself */
3325                 skb->prev = skb;
3326
3327                 skb = validate_xmit_skb(skb, dev, again);
3328                 if (!skb)
3329                         continue;
3330
3331                 if (!head)
3332                         head = skb;
3333                 else
3334                         tail->next = skb;
3335                 /* If skb was segmented, skb->prev points to
3336                  * the last segment. If not, it still contains skb.
3337                  */
3338                 tail = skb->prev;
3339         }
3340         return head;
3341 }
3342 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3343
3344 static void qdisc_pkt_len_init(struct sk_buff *skb)
3345 {
3346         const struct skb_shared_info *shinfo = skb_shinfo(skb);
3347
3348         qdisc_skb_cb(skb)->pkt_len = skb->len;
3349
3350         /* To get more precise estimation of bytes sent on wire,
3351          * we add to pkt_len the headers size of all segments
3352          */
3353         if (shinfo->gso_size)  {
3354                 unsigned int hdr_len;
3355                 u16 gso_segs = shinfo->gso_segs;
3356
3357                 /* mac layer + network layer */
3358                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3359
3360                 /* + transport layer */
3361                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3362                         const struct tcphdr *th;
3363                         struct tcphdr _tcphdr;
3364
3365                         th = skb_header_pointer(skb, skb_transport_offset(skb),
3366                                                 sizeof(_tcphdr), &_tcphdr);
3367                         if (likely(th))
3368                                 hdr_len += __tcp_hdrlen(th);
3369                 } else {
3370                         struct udphdr _udphdr;
3371
3372                         if (skb_header_pointer(skb, skb_transport_offset(skb),
3373                                                sizeof(_udphdr), &_udphdr))
3374                                 hdr_len += sizeof(struct udphdr);
3375                 }
3376
3377                 if (shinfo->gso_type & SKB_GSO_DODGY)
3378                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3379                                                 shinfo->gso_size);
3380
3381                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3382         }
3383 }
3384
3385 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3386                                  struct net_device *dev,
3387                                  struct netdev_queue *txq)
3388 {
3389         spinlock_t *root_lock = qdisc_lock(q);
3390         struct sk_buff *to_free = NULL;
3391         bool contended;
3392         int rc;
3393
3394         qdisc_calculate_pkt_len(skb, q);
3395
3396         if (q->flags & TCQ_F_NOLOCK) {
3397                 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3398                         __qdisc_drop(skb, &to_free);
3399                         rc = NET_XMIT_DROP;
3400                 } else {
3401                         rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3402                         qdisc_run(q);
3403                 }
3404
3405                 if (unlikely(to_free))
3406                         kfree_skb_list(to_free);
3407                 return rc;
3408         }
3409
3410         /*
3411          * Heuristic to force contended enqueues to serialize on a
3412          * separate lock before trying to get qdisc main lock.
3413          * This permits qdisc->running owner to get the lock more
3414          * often and dequeue packets faster.
3415          */
3416         contended = qdisc_is_running(q);
3417         if (unlikely(contended))
3418                 spin_lock(&q->busylock);
3419
3420         spin_lock(root_lock);
3421         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3422                 __qdisc_drop(skb, &to_free);
3423                 rc = NET_XMIT_DROP;
3424         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3425                    qdisc_run_begin(q)) {
3426                 /*
3427                  * This is a work-conserving queue; there are no old skbs
3428                  * waiting to be sent out; and the qdisc is not running -
3429                  * xmit the skb directly.
3430                  */
3431
3432                 qdisc_bstats_update(q, skb);
3433
3434                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3435                         if (unlikely(contended)) {
3436                                 spin_unlock(&q->busylock);
3437                                 contended = false;
3438                         }
3439                         __qdisc_run(q);
3440                 }
3441
3442                 qdisc_run_end(q);
3443                 rc = NET_XMIT_SUCCESS;
3444         } else {
3445                 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3446                 if (qdisc_run_begin(q)) {
3447                         if (unlikely(contended)) {
3448                                 spin_unlock(&q->busylock);
3449                                 contended = false;
3450                         }
3451                         __qdisc_run(q);
3452                         qdisc_run_end(q);
3453                 }
3454         }
3455         spin_unlock(root_lock);
3456         if (unlikely(to_free))
3457                 kfree_skb_list(to_free);
3458         if (unlikely(contended))
3459                 spin_unlock(&q->busylock);
3460         return rc;
3461 }
3462
3463 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3464 static void skb_update_prio(struct sk_buff *skb)
3465 {
3466         const struct netprio_map *map;
3467         const struct sock *sk;
3468         unsigned int prioidx;
3469
3470         if (skb->priority)
3471                 return;
3472         map = rcu_dereference_bh(skb->dev->priomap);
3473         if (!map)
3474                 return;
3475         sk = skb_to_full_sk(skb);
3476         if (!sk)
3477                 return;
3478
3479         prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3480
3481         if (prioidx < map->priomap_len)
3482                 skb->priority = map->priomap[prioidx];
3483 }
3484 #else
3485 #define skb_update_prio(skb)
3486 #endif
3487
3488 DEFINE_PER_CPU(int, xmit_recursion);
3489 EXPORT_SYMBOL(xmit_recursion);
3490
3491 /**
3492  *      dev_loopback_xmit - loop back @skb
3493  *      @net: network namespace this loopback is happening in
3494  *      @sk:  sk needed to be a netfilter okfn
3495  *      @skb: buffer to transmit
3496  */
3497 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3498 {
3499         skb_reset_mac_header(skb);
3500         __skb_pull(skb, skb_network_offset(skb));
3501         skb->pkt_type = PACKET_LOOPBACK;
3502         skb->ip_summed = CHECKSUM_UNNECESSARY;
3503         WARN_ON(!skb_dst(skb));
3504         skb_dst_force(skb);
3505         netif_rx_ni(skb);
3506         return 0;
3507 }
3508 EXPORT_SYMBOL(dev_loopback_xmit);
3509
3510 #ifdef CONFIG_NET_EGRESS
3511 static struct sk_buff *
3512 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3513 {
3514         struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3515         struct tcf_result cl_res;
3516
3517         if (!miniq)
3518                 return skb;
3519
3520         /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3521         mini_qdisc_bstats_cpu_update(miniq, skb);
3522
3523         switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3524         case TC_ACT_OK:
3525         case TC_ACT_RECLASSIFY:
3526                 skb->tc_index = TC_H_MIN(cl_res.classid);
3527                 break;
3528         case TC_ACT_SHOT:
3529                 mini_qdisc_qstats_cpu_drop(miniq);
3530                 *ret = NET_XMIT_DROP;
3531                 kfree_skb(skb);
3532                 return NULL;
3533         case TC_ACT_STOLEN:
3534         case TC_ACT_QUEUED:
3535         case TC_ACT_TRAP:
3536                 *ret = NET_XMIT_SUCCESS;
3537                 consume_skb(skb);
3538                 return NULL;
3539         case TC_ACT_REDIRECT:
3540                 /* No need to push/pop skb's mac_header here on egress! */
3541                 skb_do_redirect(skb);
3542                 *ret = NET_XMIT_SUCCESS;
3543                 return NULL;
3544         default:
3545                 break;
3546         }
3547
3548         return skb;
3549 }
3550 #endif /* CONFIG_NET_EGRESS */
3551
3552 #ifdef CONFIG_XPS
3553 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3554                                struct xps_dev_maps *dev_maps, unsigned int tci)
3555 {
3556         struct xps_map *map;
3557         int queue_index = -1;
3558
3559         if (dev->num_tc) {
3560                 tci *= dev->num_tc;
3561                 tci += netdev_get_prio_tc_map(dev, skb->priority);
3562         }
3563
3564         map = rcu_dereference(dev_maps->attr_map[tci]);
3565         if (map) {
3566                 if (map->len == 1)
3567                         queue_index = map->queues[0];
3568                 else
3569                         queue_index = map->queues[reciprocal_scale(
3570                                                 skb_get_hash(skb), map->len)];
3571                 if (unlikely(queue_index >= dev->real_num_tx_queues))
3572                         queue_index = -1;
3573         }
3574         return queue_index;
3575 }
3576 #endif
3577
3578 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3579                          struct sk_buff *skb)
3580 {
3581 #ifdef CONFIG_XPS
3582         struct xps_dev_maps *dev_maps;
3583         struct sock *sk = skb->sk;
3584         int queue_index = -1;
3585
3586         if (!static_key_false(&xps_needed))
3587                 return -1;
3588
3589         rcu_read_lock();
3590         if (!static_key_false(&xps_rxqs_needed))
3591                 goto get_cpus_map;
3592
3593         dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3594         if (dev_maps) {
3595                 int tci = sk_rx_queue_get(sk);
3596
3597                 if (tci >= 0 && tci < dev->num_rx_queues)
3598                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3599                                                           tci);
3600         }
3601
3602 get_cpus_map:
3603         if (queue_index < 0) {
3604                 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3605                 if (dev_maps) {
3606                         unsigned int tci = skb->sender_cpu - 1;
3607
3608                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3609                                                           tci);
3610                 }
3611         }
3612         rcu_read_unlock();
3613
3614         return queue_index;
3615 #else
3616         return -1;
3617 #endif
3618 }
3619
3620 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3621                      struct net_device *sb_dev,
3622                      select_queue_fallback_t fallback)
3623 {
3624         return 0;
3625 }
3626 EXPORT_SYMBOL(dev_pick_tx_zero);
3627
3628 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3629                        struct net_device *sb_dev,
3630                        select_queue_fallback_t fallback)
3631 {
3632         return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3633 }
3634 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3635
3636 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3637                             struct net_device *sb_dev)
3638 {
3639         struct sock *sk = skb->sk;
3640         int queue_index = sk_tx_queue_get(sk);
3641
3642         sb_dev = sb_dev ? : dev;
3643
3644         if (queue_index < 0 || skb->ooo_okay ||
3645             queue_index >= dev->real_num_tx_queues) {
3646                 int new_index = get_xps_queue(dev, sb_dev, skb);
3647
3648                 if (new_index < 0)
3649                         new_index = skb_tx_hash(dev, sb_dev, skb);
3650
3651                 if (queue_index != new_index && sk &&
3652                     sk_fullsock(sk) &&
3653                     rcu_access_pointer(sk->sk_dst_cache))
3654                         sk_tx_queue_set(sk, new_index);
3655
3656                 queue_index = new_index;
3657         }
3658
3659         return queue_index;
3660 }
3661
3662 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3663                                     struct sk_buff *skb,
3664                                     struct net_device *sb_dev)
3665 {
3666         int queue_index = 0;
3667
3668 #ifdef CONFIG_XPS
3669         u32 sender_cpu = skb->sender_cpu - 1;
3670
3671         if (sender_cpu >= (u32)NR_CPUS)
3672                 skb->sender_cpu = raw_smp_processor_id() + 1;
3673 #endif
3674
3675         if (dev->real_num_tx_queues != 1) {
3676                 const struct net_device_ops *ops = dev->netdev_ops;
3677
3678                 if (ops->ndo_select_queue)
3679                         queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
3680                                                             __netdev_pick_tx);
3681                 else
3682                         queue_index = __netdev_pick_tx(dev, skb, sb_dev);
3683
3684                 queue_index = netdev_cap_txqueue(dev, queue_index);
3685         }
3686
3687         skb_set_queue_mapping(skb, queue_index);
3688         return netdev_get_tx_queue(dev, queue_index);
3689 }
3690
3691 /**
3692  *      __dev_queue_xmit - transmit a buffer
3693  *      @skb: buffer to transmit
3694  *      @sb_dev: suboordinate device used for L2 forwarding offload
3695  *
3696  *      Queue a buffer for transmission to a network device. The caller must
3697  *      have set the device and priority and built the buffer before calling
3698  *      this function. The function can be called from an interrupt.
3699  *
3700  *      A negative errno code is returned on a failure. A success does not
3701  *      guarantee the frame will be transmitted as it may be dropped due
3702  *      to congestion or traffic shaping.
3703  *
3704  * -----------------------------------------------------------------------------------
3705  *      I notice this method can also return errors from the queue disciplines,
3706  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3707  *      be positive.
3708  *
3709  *      Regardless of the return value, the skb is consumed, so it is currently
3710  *      difficult to retry a send to this method.  (You can bump the ref count
3711  *      before sending to hold a reference for retry if you are careful.)
3712  *
3713  *      When calling this method, interrupts MUST be enabled.  This is because
3714  *      the BH enable code must have IRQs enabled so that it will not deadlock.
3715  *          --BLG
3716  */
3717 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3718 {
3719         struct net_device *dev = skb->dev;
3720         struct netdev_queue *txq;
3721         struct Qdisc *q;
3722         int rc = -ENOMEM;
3723         bool again = false;
3724
3725         skb_reset_mac_header(skb);
3726
3727         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3728                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3729
3730         /* Disable soft irqs for various locks below. Also
3731          * stops preemption for RCU.
3732          */
3733         rcu_read_lock_bh();
3734
3735         skb_update_prio(skb);
3736
3737         qdisc_pkt_len_init(skb);
3738 #ifdef CONFIG_NET_CLS_ACT
3739         skb->tc_at_ingress = 0;
3740 # ifdef CONFIG_NET_EGRESS
3741         if (static_branch_unlikely(&egress_needed_key)) {
3742                 skb = sch_handle_egress(skb, &rc, dev);
3743                 if (!skb)
3744                         goto out;
3745         }
3746 # endif
3747 #endif
3748         /* If device/qdisc don't need skb->dst, release it right now while
3749          * its hot in this cpu cache.
3750          */
3751         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3752                 skb_dst_drop(skb);
3753         else
3754                 skb_dst_force(skb);
3755
3756         txq = netdev_pick_tx(dev, skb, sb_dev);
3757         q = rcu_dereference_bh(txq->qdisc);
3758
3759         trace_net_dev_queue(skb);
3760         if (q->enqueue) {
3761                 rc = __dev_xmit_skb(skb, q, dev, txq);
3762                 goto out;
3763         }
3764
3765         /* The device has no queue. Common case for software devices:
3766          * loopback, all the sorts of tunnels...
3767
3768          * Really, it is unlikely that netif_tx_lock protection is necessary
3769          * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
3770          * counters.)
3771          * However, it is possible, that they rely on protection
3772          * made by us here.
3773
3774          * Check this and shot the lock. It is not prone from deadlocks.
3775          *Either shot noqueue qdisc, it is even simpler 8)
3776          */
3777         if (dev->flags & IFF_UP) {
3778                 int cpu = smp_processor_id(); /* ok because BHs are off */
3779
3780                 if (txq->xmit_lock_owner != cpu) {
3781                         if (unlikely(__this_cpu_read(xmit_recursion) >
3782                                      XMIT_RECURSION_LIMIT))
3783                                 goto recursion_alert;
3784
3785                         skb = validate_xmit_skb(skb, dev, &again);
3786                         if (!skb)
3787                                 goto out;
3788
3789                         HARD_TX_LOCK(dev, txq, cpu);
3790
3791                         if (!netif_xmit_stopped(txq)) {
3792                                 __this_cpu_inc(xmit_recursion);
3793                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3794                                 __this_cpu_dec(xmit_recursion);
3795                                 if (dev_xmit_complete(rc)) {
3796                                         HARD_TX_UNLOCK(dev, txq);
3797                                         goto out;
3798                                 }
3799                         }
3800                         HARD_TX_UNLOCK(dev, txq);
3801                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3802                                              dev->name);
3803                 } else {
3804                         /* Recursion is detected! It is possible,
3805                          * unfortunately
3806                          */
3807 recursion_alert:
3808                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3809                                              dev->name);
3810                 }
3811         }
3812
3813         rc = -ENETDOWN;
3814         rcu_read_unlock_bh();
3815
3816         atomic_long_inc(&dev->tx_dropped);
3817         kfree_skb_list(skb);
3818         return rc;
3819 out:
3820         rcu_read_unlock_bh();
3821         return rc;
3822 }
3823
3824 int dev_queue_xmit(struct sk_buff *skb)
3825 {
3826         return __dev_queue_xmit(skb, NULL);
3827 }
3828 EXPORT_SYMBOL(dev_queue_xmit);
3829
3830 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
3831 {
3832         return __dev_queue_xmit(skb, sb_dev);
3833 }
3834 EXPORT_SYMBOL(dev_queue_xmit_accel);
3835
3836 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3837 {
3838         struct net_device *dev = skb->dev;
3839         struct sk_buff *orig_skb = skb;
3840         struct netdev_queue *txq;
3841         int ret = NETDEV_TX_BUSY;
3842         bool again = false;
3843
3844         if (unlikely(!netif_running(dev) ||
3845                      !netif_carrier_ok(dev)))
3846                 goto drop;
3847
3848         skb = validate_xmit_skb_list(skb, dev, &again);
3849         if (skb != orig_skb)
3850                 goto drop;
3851
3852         skb_set_queue_mapping(skb, queue_id);
3853         txq = skb_get_tx_queue(dev, skb);
3854
3855         local_bh_disable();
3856
3857         HARD_TX_LOCK(dev, txq, smp_processor_id());
3858         if (!netif_xmit_frozen_or_drv_stopped(txq))
3859                 ret = netdev_start_xmit(skb, dev, txq, false);
3860         HARD_TX_UNLOCK(dev, txq);
3861
3862         local_bh_enable();
3863
3864         if (!dev_xmit_complete(ret))
3865                 kfree_skb(skb);
3866
3867         return ret;
3868 drop:
3869         atomic_long_inc(&dev->tx_dropped);
3870         kfree_skb_list(skb);
3871         return NET_XMIT_DROP;
3872 }
3873 EXPORT_SYMBOL(dev_direct_xmit);
3874
3875 /*************************************************************************
3876  *                      Receiver routines
3877  *************************************************************************/
3878
3879 int netdev_max_backlog __read_mostly = 1000;
3880 EXPORT_SYMBOL(netdev_max_backlog);
3881
3882 int netdev_tstamp_prequeue __read_mostly = 1;
3883 int netdev_budget __read_mostly = 300;
3884 unsigned int __read_mostly netdev_budget_usecs = 2000;
3885 int weight_p __read_mostly = 64;           /* old backlog weight */
3886 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
3887 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
3888 int dev_rx_weight __read_mostly = 64;
3889 int dev_tx_weight __read_mostly = 64;
3890
3891 /* Called with irq disabled */
3892 static inline void ____napi_schedule(struct softnet_data *sd,
3893                                      struct napi_struct *napi)
3894 {
3895         list_add_tail(&napi->poll_list, &sd->poll_list);
3896         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3897 }
3898
3899 #ifdef CONFIG_RPS
3900
3901 /* One global table that all flow-based protocols share. */
3902 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3903 EXPORT_SYMBOL(rps_sock_flow_table);
3904 u32 rps_cpu_mask __read_mostly;
3905 EXPORT_SYMBOL(rps_cpu_mask);
3906
3907 struct static_key rps_needed __read_mostly;
3908 EXPORT_SYMBOL(rps_needed);
3909 struct static_key rfs_needed __read_mostly;
3910 EXPORT_SYMBOL(rfs_needed);
3911
3912 static struct rps_dev_flow *
3913 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3914             struct rps_dev_flow *rflow, u16 next_cpu)
3915 {
3916         if (next_cpu < nr_cpu_ids) {
3917 #ifdef CONFIG_RFS_ACCEL
3918                 struct netdev_rx_queue *rxqueue;
3919                 struct rps_dev_flow_table *flow_table;
3920                 struct rps_dev_flow *old_rflow;
3921                 u32 flow_id;
3922                 u16 rxq_index;
3923                 int rc;
3924
3925                 /* Should we steer this flow to a different hardware queue? */
3926                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3927                     !(dev->features & NETIF_F_NTUPLE))
3928                         goto out;
3929                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3930                 if (rxq_index == skb_get_rx_queue(skb))
3931                         goto out;
3932
3933                 rxqueue = dev->_rx + rxq_index;
3934                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3935                 if (!flow_table)
3936                         goto out;
3937                 flow_id = skb_get_hash(skb) & flow_table->mask;
3938                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3939                                                         rxq_index, flow_id);
3940                 if (rc < 0)
3941                         goto out;
3942                 old_rflow = rflow;
3943                 rflow = &flow_table->flows[flow_id];
3944                 rflow->filter = rc;
3945                 if (old_rflow->filter == rflow->filter)
3946                         old_rflow->filter = RPS_NO_FILTER;
3947         out:
3948 #endif
3949                 rflow->last_qtail =
3950                         per_cpu(softnet_data, next_cpu).input_queue_head;
3951         }
3952
3953         rflow->cpu = next_cpu;
3954         return rflow;
3955 }
3956
3957 /*
3958  * get_rps_cpu is called from netif_receive_skb and returns the target
3959  * CPU from the RPS map of the receiving queue for a given skb.
3960  * rcu_read_lock must be held on entry.
3961  */
3962 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3963                        struct rps_dev_flow **rflowp)
3964 {
3965         const struct rps_sock_flow_table *sock_flow_table;
3966         struct netdev_rx_queue *rxqueue = dev->_rx;
3967         struct rps_dev_flow_table *flow_table;
3968         struct rps_map *map;
3969         int cpu = -1;
3970         u32 tcpu;
3971         u32 hash;
3972
3973         if (skb_rx_queue_recorded(skb)) {
3974                 u16 index = skb_get_rx_queue(skb);
3975
3976                 if (unlikely(index >= dev->real_num_rx_queues)) {
3977                         WARN_ONCE(dev->real_num_rx_queues > 1,
3978                                   "%s received packet on queue %u, but number "
3979                                   "of RX queues is %u\n",
3980                                   dev->name, index, dev->real_num_rx_queues);
3981                         goto done;
3982                 }
3983                 rxqueue += index;
3984         }
3985
3986         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3987
3988         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3989         map = rcu_dereference(rxqueue->rps_map);
3990         if (!flow_table && !map)
3991                 goto done;
3992
3993         skb_reset_network_header(skb);
3994         hash = skb_get_hash(skb);
3995         if (!hash)
3996                 goto done;
3997
3998         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3999         if (flow_table && sock_flow_table) {
4000                 struct rps_dev_flow *rflow;
4001                 u32 next_cpu;
4002                 u32 ident;
4003
4004                 /* First check into global flow table if there is a match */
4005                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4006                 if ((ident ^ hash) & ~rps_cpu_mask)
4007                         goto try_rps;
4008
4009                 next_cpu = ident & rps_cpu_mask;
4010
4011                 /* OK, now we know there is a match,
4012                  * we can look at the local (per receive queue) flow table
4013                  */
4014                 rflow = &flow_table->flows[hash & flow_table->mask];
4015                 tcpu = rflow->cpu;
4016
4017                 /*
4018                  * If the desired CPU (where last recvmsg was done) is
4019                  * different from current CPU (one in the rx-queue flow
4020                  * table entry), switch if one of the following holds:
4021                  *   - Current CPU is unset (>= nr_cpu_ids).
4022                  *   - Current CPU is offline.
4023                  *   - The current CPU's queue tail has advanced beyond the
4024                  *     last packet that was enqueued using this table entry.
4025                  *     This guarantees that all previous packets for the flow
4026                  *     have been dequeued, thus preserving in order delivery.
4027                  */
4028                 if (unlikely(tcpu != next_cpu) &&
4029                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4030                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4031                       rflow->last_qtail)) >= 0)) {
4032                         tcpu = next_cpu;
4033                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4034                 }
4035
4036                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4037                         *rflowp = rflow;
4038                         cpu = tcpu;
4039                         goto done;
4040                 }
4041         }
4042
4043 try_rps:
4044
4045         if (map) {
4046                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4047                 if (cpu_online(tcpu)) {
4048                         cpu = tcpu;
4049                         goto done;
4050                 }
4051         }
4052
4053 done:
4054         return cpu;
4055 }
4056
4057 #ifdef CONFIG_RFS_ACCEL
4058
4059 /**
4060  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4061  * @dev: Device on which the filter was set
4062  * @rxq_index: RX queue index
4063  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4064  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4065  *
4066  * Drivers that implement ndo_rx_flow_steer() should periodically call
4067  * this function for each installed filter and remove the filters for
4068  * which it returns %true.
4069  */
4070 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4071                          u32 flow_id, u16 filter_id)
4072 {
4073         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4074         struct rps_dev_flow_table *flow_table;
4075         struct rps_dev_flow *rflow;
4076         bool expire = true;
4077         unsigned int cpu;
4078
4079         rcu_read_lock();
4080         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4081         if (flow_table && flow_id <= flow_table->mask) {
4082                 rflow = &flow_table->flows[flow_id];
4083                 cpu = READ_ONCE(rflow->cpu);
4084                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4085                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4086                            rflow->last_qtail) <
4087                      (int)(10 * flow_table->mask)))
4088                         expire = false;
4089         }
4090         rcu_read_unlock();
4091         return expire;
4092 }
4093 EXPORT_SYMBOL(rps_may_expire_flow);
4094
4095 #endif /* CONFIG_RFS_ACCEL */
4096
4097 /* Called from hardirq (IPI) context */
4098 static void rps_trigger_softirq(void *data)
4099 {
4100         struct softnet_data *sd = data;
4101
4102         ____napi_schedule(sd, &sd->backlog);
4103         sd->received_rps++;
4104 }
4105
4106 #endif /* CONFIG_RPS */
4107
4108 /*
4109  * Check if this softnet_data structure is another cpu one
4110  * If yes, queue it to our IPI list and return 1
4111  * If no, return 0
4112  */
4113 static int rps_ipi_queued(struct softnet_data *sd)
4114 {
4115 #ifdef CONFIG_RPS
4116         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4117
4118         if (sd != mysd) {
4119                 sd->rps_ipi_next = mysd->rps_ipi_list;
4120                 mysd->rps_ipi_list = sd;
4121
4122                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4123                 return 1;
4124         }
4125 #endif /* CONFIG_RPS */
4126         return 0;
4127 }
4128
4129 #ifdef CONFIG_NET_FLOW_LIMIT
4130 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4131 #endif
4132
4133 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4134 {
4135 #ifdef CONFIG_NET_FLOW_LIMIT
4136         struct sd_flow_limit *fl;
4137         struct softnet_data *sd;
4138         unsigned int old_flow, new_flow;
4139
4140         if (qlen < (netdev_max_backlog >> 1))
4141                 return false;
4142
4143         sd = this_cpu_ptr(&softnet_data);
4144
4145         rcu_read_lock();
4146         fl = rcu_dereference(sd->flow_limit);
4147         if (fl) {
4148                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4149                 old_flow = fl->history[fl->history_head];
4150                 fl->history[fl->history_head] = new_flow;
4151
4152                 fl->history_head++;
4153                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4154
4155                 if (likely(fl->buckets[old_flow]))
4156                         fl->buckets[old_flow]--;
4157
4158                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4159                         fl->count++;
4160                         rcu_read_unlock();
4161                         return true;
4162                 }
4163         }
4164         rcu_read_unlock();
4165 #endif
4166         return false;
4167 }
4168
4169 /*
4170  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4171  * queue (may be a remote CPU queue).
4172  */
4173 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4174                               unsigned int *qtail)
4175 {
4176         struct softnet_data *sd;
4177         unsigned long flags;
4178         unsigned int qlen;
4179
4180         sd = &per_cpu(softnet_data, cpu);
4181
4182         local_irq_save(flags);
4183
4184         rps_lock(sd);
4185         if (!netif_running(skb->dev))
4186                 goto drop;
4187         qlen = skb_queue_len(&sd->input_pkt_queue);
4188         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4189                 if (qlen) {
4190 enqueue:
4191                         __skb_queue_tail(&sd->input_pkt_queue, skb);
4192                         input_queue_tail_incr_save(sd, qtail);
4193                         rps_unlock(sd);
4194                         local_irq_restore(flags);
4195                         return NET_RX_SUCCESS;
4196                 }
4197
4198                 /* Schedule NAPI for backlog device
4199                  * We can use non atomic operation since we own the queue lock
4200                  */
4201                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4202                         if (!rps_ipi_queued(sd))
4203                                 ____napi_schedule(sd, &sd->backlog);
4204                 }
4205                 goto enqueue;
4206         }
4207
4208 drop:
4209         sd->dropped++;
4210         rps_unlock(sd);
4211
4212         local_irq_restore(flags);
4213
4214         atomic_long_inc(&skb->dev->rx_dropped);
4215         kfree_skb(skb);
4216         return NET_RX_DROP;
4217 }
4218
4219 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4220 {
4221         struct net_device *dev = skb->dev;
4222         struct netdev_rx_queue *rxqueue;
4223
4224         rxqueue = dev->_rx;
4225
4226         if (skb_rx_queue_recorded(skb)) {
4227                 u16 index = skb_get_rx_queue(skb);
4228
4229                 if (unlikely(index >= dev->real_num_rx_queues)) {
4230                         WARN_ONCE(dev->real_num_rx_queues > 1,
4231                                   "%s received packet on queue %u, but number "
4232                                   "of RX queues is %u\n",
4233                                   dev->name, index, dev->real_num_rx_queues);
4234
4235                         return rxqueue; /* Return first rxqueue */
4236                 }
4237                 rxqueue += index;
4238         }
4239         return rxqueue;
4240 }
4241
4242 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4243                                      struct xdp_buff *xdp,
4244                                      struct bpf_prog *xdp_prog)
4245 {
4246         struct netdev_rx_queue *rxqueue;
4247         void *orig_data, *orig_data_end;
4248         u32 metalen, act = XDP_DROP;
4249         int hlen, off;
4250         u32 mac_len;
4251
4252         /* Reinjected packets coming from act_mirred or similar should
4253          * not get XDP generic processing.
4254          */
4255         if (skb_cloned(skb) || skb_is_tc_redirected(skb))
4256                 return XDP_PASS;
4257
4258         /* XDP packets must be linear and must have sufficient headroom
4259          * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4260          * native XDP provides, thus we need to do it here as well.
4261          */
4262         if (skb_is_nonlinear(skb) ||
4263             skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4264                 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4265                 int troom = skb->tail + skb->data_len - skb->end;
4266
4267                 /* In case we have to go down the path and also linearize,
4268                  * then lets do the pskb_expand_head() work just once here.
4269                  */
4270                 if (pskb_expand_head(skb,
4271                                      hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4272                                      troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4273                         goto do_drop;
4274                 if (skb_linearize(skb))
4275                         goto do_drop;
4276         }
4277
4278         /* The XDP program wants to see the packet starting at the MAC
4279          * header.
4280          */
4281         mac_len = skb->data - skb_mac_header(skb);
4282         hlen = skb_headlen(skb) + mac_len;
4283         xdp->data = skb->data - mac_len;
4284         xdp->data_meta = xdp->data;
4285         xdp->data_end = xdp->data + hlen;
4286         xdp->data_hard_start = skb->data - skb_headroom(skb);
4287         orig_data_end = xdp->data_end;
4288         orig_data = xdp->data;
4289
4290         rxqueue = netif_get_rxqueue(skb);
4291         xdp->rxq = &rxqueue->xdp_rxq;
4292
4293         act = bpf_prog_run_xdp(xdp_prog, xdp);
4294
4295         off = xdp->data - orig_data;
4296         if (off > 0)
4297                 __skb_pull(skb, off);
4298         else if (off < 0)
4299                 __skb_push(skb, -off);
4300         skb->mac_header += off;
4301
4302         /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4303          * pckt.
4304          */
4305         off = orig_data_end - xdp->data_end;
4306         if (off != 0) {
4307                 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4308                 skb->len -= off;
4309
4310         }
4311
4312         switch (act) {
4313         case XDP_REDIRECT:
4314         case XDP_TX:
4315                 __skb_push(skb, mac_len);
4316                 break;
4317         case XDP_PASS:
4318                 metalen = xdp->data - xdp->data_meta;
4319                 if (metalen)
4320                         skb_metadata_set(skb, metalen);
4321                 break;
4322         default:
4323                 bpf_warn_invalid_xdp_action(act);
4324                 /* fall through */
4325         case XDP_ABORTED:
4326                 trace_xdp_exception(skb->dev, xdp_prog, act);
4327                 /* fall through */
4328         case XDP_DROP:
4329         do_drop:
4330                 kfree_skb(skb);
4331                 break;
4332         }
4333
4334         return act;
4335 }
4336
4337 /* When doing generic XDP we have to bypass the qdisc layer and the
4338  * network taps in order to match in-driver-XDP behavior.
4339  */
4340 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4341 {
4342         struct net_device *dev = skb->dev;
4343         struct netdev_queue *txq;
4344         bool free_skb = true;
4345         int cpu, rc;
4346
4347         txq = netdev_pick_tx(dev, skb, NULL);
4348         cpu = smp_processor_id();
4349         HARD_TX_LOCK(dev, txq, cpu);
4350         if (!netif_xmit_stopped(txq)) {
4351                 rc = netdev_start_xmit(skb, dev, txq, 0);
4352                 if (dev_xmit_complete(rc))
4353                         free_skb = false;
4354         }
4355         HARD_TX_UNLOCK(dev, txq);
4356         if (free_skb) {
4357                 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4358                 kfree_skb(skb);
4359         }
4360 }
4361 EXPORT_SYMBOL_GPL(generic_xdp_tx);
4362
4363 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4364
4365 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4366 {
4367         if (xdp_prog) {
4368                 struct xdp_buff xdp;
4369                 u32 act;
4370                 int err;
4371
4372                 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4373                 if (act != XDP_PASS) {
4374                         switch (act) {
4375                         case XDP_REDIRECT:
4376                                 err = xdp_do_generic_redirect(skb->dev, skb,
4377                                                               &xdp, xdp_prog);
4378                                 if (err)
4379                                         goto out_redir;
4380                                 break;
4381                         case XDP_TX:
4382                                 generic_xdp_tx(skb, xdp_prog);
4383                                 break;
4384                         }
4385                         return XDP_DROP;
4386                 }
4387         }
4388         return XDP_PASS;
4389 out_redir:
4390         kfree_skb(skb);
4391         return XDP_DROP;
4392 }
4393 EXPORT_SYMBOL_GPL(do_xdp_generic);
4394
4395 static int netif_rx_internal(struct sk_buff *skb)
4396 {
4397         int ret;
4398
4399         net_timestamp_check(netdev_tstamp_prequeue, skb);
4400
4401         trace_netif_rx(skb);
4402
4403         if (static_branch_unlikely(&generic_xdp_needed_key)) {
4404                 int ret;
4405
4406                 preempt_disable();
4407                 rcu_read_lock();
4408                 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4409                 rcu_read_unlock();
4410                 preempt_enable();
4411
4412                 /* Consider XDP consuming the packet a success from
4413                  * the netdev point of view we do not want to count
4414                  * this as an error.
4415                  */
4416                 if (ret != XDP_PASS)
4417                         return NET_RX_SUCCESS;
4418         }
4419
4420 #ifdef CONFIG_RPS
4421         if (static_key_false(&rps_needed)) {
4422                 struct rps_dev_flow voidflow, *rflow = &voidflow;
4423                 int cpu;
4424
4425                 preempt_disable();
4426                 rcu_read_lock();
4427
4428                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4429                 if (cpu < 0)
4430                         cpu = smp_processor_id();
4431
4432                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4433
4434                 rcu_read_unlock();
4435                 preempt_enable();
4436         } else
4437 #endif
4438         {
4439                 unsigned int qtail;
4440
4441                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4442                 put_cpu();
4443         }
4444         return ret;
4445 }
4446
4447 /**
4448  *      netif_rx        -       post buffer to the network code
4449  *      @skb: buffer to post
4450  *
4451  *      This function receives a packet from a device driver and queues it for
4452  *      the upper (protocol) levels to process.  It always succeeds. The buffer
4453  *      may be dropped during processing for congestion control or by the
4454  *      protocol layers.
4455  *
4456  *      return values:
4457  *      NET_RX_SUCCESS  (no congestion)
4458  *      NET_RX_DROP     (packet was dropped)
4459  *
4460  */
4461
4462 int netif_rx(struct sk_buff *skb)
4463 {
4464         trace_netif_rx_entry(skb);
4465
4466         return netif_rx_internal(skb);
4467 }
4468 EXPORT_SYMBOL(netif_rx);
4469
4470 int netif_rx_ni(struct sk_buff *skb)
4471 {
4472         int err;
4473
4474         trace_netif_rx_ni_entry(skb);
4475
4476         preempt_disable();
4477         err = netif_rx_internal(skb);
4478         if (local_softirq_pending())
4479                 do_softirq();
4480         preempt_enable();
4481
4482         return err;
4483 }
4484 EXPORT_SYMBOL(netif_rx_ni);
4485
4486 static __latent_entropy void net_tx_action(struct softirq_action *h)
4487 {
4488         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4489
4490         if (sd->completion_queue) {
4491                 struct sk_buff *clist;
4492
4493                 local_irq_disable();
4494                 clist = sd->completion_queue;
4495                 sd->completion_queue = NULL;
4496                 local_irq_enable();
4497
4498                 while (clist) {
4499                         struct sk_buff *skb = clist;
4500
4501                         clist = clist->next;
4502
4503                         WARN_ON(refcount_read(&skb->users));
4504                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4505                                 trace_consume_skb(skb);
4506                         else
4507                                 trace_kfree_skb(skb, net_tx_action);
4508
4509                         if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4510                                 __kfree_skb(skb);
4511                         else
4512                                 __kfree_skb_defer(skb);
4513                 }
4514
4515                 __kfree_skb_flush();
4516         }
4517
4518         if (sd->output_queue) {
4519                 struct Qdisc *head;
4520
4521                 local_irq_disable();
4522                 head = sd->output_queue;
4523                 sd->output_queue = NULL;
4524                 sd->output_queue_tailp = &sd->output_queue;
4525                 local_irq_enable();
4526
4527                 while (head) {
4528                         struct Qdisc *q = head;
4529                         spinlock_t *root_lock = NULL;
4530
4531                         head = head->next_sched;
4532
4533                         if (!(q->flags & TCQ_F_NOLOCK)) {
4534                                 root_lock = qdisc_lock(q);
4535                                 spin_lock(root_lock);
4536                         }
4537                         /* We need to make sure head->next_sched is read
4538                          * before clearing __QDISC_STATE_SCHED
4539                          */
4540                         smp_mb__before_atomic();
4541                         clear_bit(__QDISC_STATE_SCHED, &q->state);
4542                         qdisc_run(q);
4543                         if (root_lock)
4544                                 spin_unlock(root_lock);
4545                 }
4546         }
4547
4548         xfrm_dev_backlog(sd);
4549 }
4550
4551 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4552 /* This hook is defined here for ATM LANE */
4553 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4554                              unsigned char *addr) __read_mostly;
4555 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4556 #endif
4557
4558 static inline struct sk_buff *
4559 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4560                    struct net_device *orig_dev)
4561 {
4562 #ifdef CONFIG_NET_CLS_ACT
4563         struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4564         struct tcf_result cl_res;
4565
4566         /* If there's at least one ingress present somewhere (so
4567          * we get here via enabled static key), remaining devices
4568          * that are not configured with an ingress qdisc will bail
4569          * out here.
4570          */
4571         if (!miniq)
4572                 return skb;
4573
4574         if (*pt_prev) {
4575                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4576                 *pt_prev = NULL;
4577         }
4578
4579         qdisc_skb_cb(skb)->pkt_len = skb->len;
4580         skb->tc_at_ingress = 1;
4581         mini_qdisc_bstats_cpu_update(miniq, skb);
4582
4583         switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4584         case TC_ACT_OK:
4585         case TC_ACT_RECLASSIFY:
4586                 skb->tc_index = TC_H_MIN(cl_res.classid);
4587                 break;
4588         case TC_ACT_SHOT:
4589                 mini_qdisc_qstats_cpu_drop(miniq);
4590                 kfree_skb(skb);
4591                 return NULL;
4592         case TC_ACT_STOLEN:
4593         case TC_ACT_QUEUED:
4594         case TC_ACT_TRAP:
4595                 consume_skb(skb);
4596                 return NULL;
4597         case TC_ACT_REDIRECT:
4598                 /* skb_mac_header check was done by cls/act_bpf, so
4599                  * we can safely push the L2 header back before
4600                  * redirecting to another netdev
4601                  */
4602                 __skb_push(skb, skb->mac_len);
4603                 skb_do_redirect(skb);
4604                 return NULL;
4605         case TC_ACT_REINSERT:
4606                 /* this does not scrub the packet, and updates stats on error */
4607                 skb_tc_reinsert(skb, &cl_res);
4608                 return NULL;
4609         default:
4610                 break;
4611         }
4612 #endif /* CONFIG_NET_CLS_ACT */
4613         return skb;
4614 }
4615
4616 /**
4617  *      netdev_is_rx_handler_busy - check if receive handler is registered
4618  *      @dev: device to check
4619  *
4620  *      Check if a receive handler is already registered for a given device.
4621  *      Return true if there one.
4622  *
4623  *      The caller must hold the rtnl_mutex.
4624  */
4625 bool netdev_is_rx_handler_busy(struct net_device *dev)
4626 {
4627         ASSERT_RTNL();
4628         return dev && rtnl_dereference(dev->rx_handler);
4629 }
4630 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4631
4632 /**
4633  *      netdev_rx_handler_register - register receive handler
4634  *      @dev: device to register a handler for
4635  *      @rx_handler: receive handler to register
4636  *      @rx_handler_data: data pointer that is used by rx handler
4637  *
4638  *      Register a receive handler for a device. This handler will then be
4639  *      called from __netif_receive_skb. A negative errno code is returned
4640  *      on a failure.
4641  *
4642  *      The caller must hold the rtnl_mutex.
4643  *
4644  *      For a general description of rx_handler, see enum rx_handler_result.
4645  */
4646 int netdev_rx_handler_register(struct net_device *dev,
4647                                rx_handler_func_t *rx_handler,
4648                                void *rx_handler_data)
4649 {
4650         if (netdev_is_rx_handler_busy(dev))
4651                 return -EBUSY;
4652
4653         if (dev->priv_flags & IFF_NO_RX_HANDLER)
4654                 return -EINVAL;
4655
4656         /* Note: rx_handler_data must be set before rx_handler */
4657         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4658         rcu_assign_pointer(dev->rx_handler, rx_handler);
4659
4660         return 0;
4661 }
4662 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4663
4664 /**
4665  *      netdev_rx_handler_unregister - unregister receive handler
4666  *      @dev: device to unregister a handler from
4667  *
4668  *      Unregister a receive handler from a device.
4669  *
4670  *      The caller must hold the rtnl_mutex.
4671  */
4672 void netdev_rx_handler_unregister(struct net_device *dev)
4673 {
4674
4675         ASSERT_RTNL();
4676         RCU_INIT_POINTER(dev->rx_handler, NULL);
4677         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4678          * section has a guarantee to see a non NULL rx_handler_data
4679          * as well.
4680          */
4681         synchronize_net();
4682         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4683 }
4684 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4685
4686 /*
4687  * Limit the use of PFMEMALLOC reserves to those protocols that implement
4688  * the special handling of PFMEMALLOC skbs.
4689  */
4690 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4691 {
4692         switch (skb->protocol) {
4693         case htons(ETH_P_ARP):
4694         case htons(ETH_P_IP):
4695         case htons(ETH_P_IPV6):
4696         case htons(ETH_P_8021Q):
4697         case htons(ETH_P_8021AD):
4698                 return true;
4699         default:
4700                 return false;
4701         }
4702 }
4703
4704 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4705                              int *ret, struct net_device *orig_dev)
4706 {
4707 #ifdef CONFIG_NETFILTER_INGRESS
4708         if (nf_hook_ingress_active(skb)) {
4709                 int ingress_retval;
4710
4711                 if (*pt_prev) {
4712                         *ret = deliver_skb(skb, *pt_prev, orig_dev);
4713                         *pt_prev = NULL;
4714                 }
4715
4716                 rcu_read_lock();
4717                 ingress_retval = nf_hook_ingress(skb);
4718                 rcu_read_unlock();
4719                 return ingress_retval;
4720         }
4721 #endif /* CONFIG_NETFILTER_INGRESS */
4722         return 0;
4723 }
4724
4725 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4726                                     struct packet_type **ppt_prev)
4727 {
4728         struct packet_type *ptype, *pt_prev;
4729         rx_handler_func_t *rx_handler;
4730         struct net_device *orig_dev;
4731         bool deliver_exact = false;
4732         int ret = NET_RX_DROP;
4733         __be16 type;
4734
4735         net_timestamp_check(!netdev_tstamp_prequeue, skb);
4736
4737         trace_netif_receive_skb(skb);
4738
4739         orig_dev = skb->dev;
4740
4741         skb_reset_network_header(skb);
4742         if (!skb_transport_header_was_set(skb))
4743                 skb_reset_transport_header(skb);
4744         skb_reset_mac_len(skb);
4745
4746         pt_prev = NULL;
4747
4748 another_round:
4749         skb->skb_iif = skb->dev->ifindex;
4750
4751         __this_cpu_inc(softnet_data.processed);
4752
4753         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4754             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4755                 skb = skb_vlan_untag(skb);
4756                 if (unlikely(!skb))
4757                         goto out;
4758         }
4759
4760         if (skb_skip_tc_classify(skb))
4761                 goto skip_classify;
4762
4763         if (pfmemalloc)
4764                 goto skip_taps;
4765
4766         list_for_each_entry_rcu(ptype, &ptype_all, list) {
4767                 if (pt_prev)
4768                         ret = deliver_skb(skb, pt_prev, orig_dev);
4769                 pt_prev = ptype;
4770         }
4771
4772         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4773                 if (pt_prev)
4774                         ret = deliver_skb(skb, pt_prev, orig_dev);
4775                 pt_prev = ptype;
4776         }
4777
4778 skip_taps:
4779 #ifdef CONFIG_NET_INGRESS
4780         if (static_branch_unlikely(&ingress_needed_key)) {
4781                 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4782                 if (!skb)
4783                         goto out;
4784
4785                 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4786                         goto out;
4787         }
4788 #endif
4789         skb_reset_tc(skb);
4790 skip_classify:
4791         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4792                 goto drop;
4793
4794         if (skb_vlan_tag_present(skb)) {
4795                 if (pt_prev) {
4796                         ret = deliver_skb(skb, pt_prev, orig_dev);
4797                         pt_prev = NULL;
4798                 }
4799                 if (vlan_do_receive(&skb))
4800                         goto another_round;
4801                 else if (unlikely(!skb))
4802                         goto out;
4803         }
4804
4805         rx_handler = rcu_dereference(skb->dev->rx_handler);
4806         if (rx_handler) {
4807                 if (pt_prev) {
4808                         ret = deliver_skb(skb, pt_prev, orig_dev);
4809                         pt_prev = NULL;
4810                 }
4811                 switch (rx_handler(&skb)) {
4812                 case RX_HANDLER_CONSUMED:
4813                         ret = NET_RX_SUCCESS;
4814                         goto out;
4815                 case RX_HANDLER_ANOTHER:
4816                         goto another_round;
4817                 case RX_HANDLER_EXACT:
4818                         deliver_exact = true;
4819                 case RX_HANDLER_PASS:
4820                         break;
4821                 default:
4822                         BUG();
4823                 }
4824         }
4825
4826         if (unlikely(skb_vlan_tag_present(skb))) {
4827                 if (skb_vlan_tag_get_id(skb))
4828                         skb->pkt_type = PACKET_OTHERHOST;
4829                 /* Note: we might in the future use prio bits
4830                  * and set skb->priority like in vlan_do_receive()
4831                  * For the time being, just ignore Priority Code Point
4832                  */
4833                 skb->vlan_tci = 0;
4834         }
4835
4836         type = skb->protocol;
4837
4838         /* deliver only exact match when indicated */
4839         if (likely(!deliver_exact)) {
4840                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4841                                        &ptype_base[ntohs(type) &
4842                                                    PTYPE_HASH_MASK]);
4843         }
4844
4845         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4846                                &orig_dev->ptype_specific);
4847
4848         if (unlikely(skb->dev != orig_dev)) {
4849                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4850                                        &skb->dev->ptype_specific);
4851         }
4852
4853         if (pt_prev) {
4854                 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
4855                         goto drop;
4856                 *ppt_prev = pt_prev;
4857         } else {
4858 drop:
4859                 if (!deliver_exact)
4860                         atomic_long_inc(&skb->dev->rx_dropped);
4861                 else
4862                         atomic_long_inc(&skb->dev->rx_nohandler);
4863                 kfree_skb(skb);
4864                 /* Jamal, now you will not able to escape explaining
4865                  * me how you were going to use this. :-)
4866                  */
4867                 ret = NET_RX_DROP;
4868         }
4869
4870 out:
4871         return ret;
4872 }
4873
4874 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
4875 {
4876         struct net_device *orig_dev = skb->dev;
4877         struct packet_type *pt_prev = NULL;
4878         int ret;
4879
4880         ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4881         if (pt_prev)
4882                 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4883         return ret;
4884 }
4885
4886 /**
4887  *      netif_receive_skb_core - special purpose version of netif_receive_skb
4888  *      @skb: buffer to process
4889  *
4890  *      More direct receive version of netif_receive_skb().  It should
4891  *      only be used by callers that have a need to skip RPS and Generic XDP.
4892  *      Caller must also take care of handling if (page_is_)pfmemalloc.
4893  *
4894  *      This function may only be called from softirq context and interrupts
4895  *      should be enabled.
4896  *
4897  *      Return values (usually ignored):
4898  *      NET_RX_SUCCESS: no congestion
4899  *      NET_RX_DROP: packet was dropped
4900  */
4901 int netif_receive_skb_core(struct sk_buff *skb)
4902 {
4903         int ret;
4904
4905         rcu_read_lock();
4906         ret = __netif_receive_skb_one_core(skb, false);
4907         rcu_read_unlock();
4908
4909         return ret;
4910 }
4911 EXPORT_SYMBOL(netif_receive_skb_core);
4912
4913 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
4914                                                   struct packet_type *pt_prev,
4915                                                   struct net_device *orig_dev)
4916 {
4917         struct sk_buff *skb, *next;
4918
4919         if (!pt_prev)
4920                 return;
4921         if (list_empty(head))
4922                 return;
4923         if (pt_prev->list_func != NULL)
4924                 pt_prev->list_func(head, pt_prev, orig_dev);
4925         else
4926                 list_for_each_entry_safe(skb, next, head, list)
4927                         pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4928 }
4929
4930 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
4931 {
4932         /* Fast-path assumptions:
4933          * - There is no RX handler.
4934          * - Only one packet_type matches.
4935          * If either of these fails, we will end up doing some per-packet
4936          * processing in-line, then handling the 'last ptype' for the whole
4937          * sublist.  This can't cause out-of-order delivery to any single ptype,
4938          * because the 'last ptype' must be constant across the sublist, and all
4939          * other ptypes are handled per-packet.
4940          */
4941         /* Current (common) ptype of sublist */
4942         struct packet_type *pt_curr = NULL;
4943         /* Current (common) orig_dev of sublist */
4944         struct net_device *od_curr = NULL;
4945         struct list_head sublist;
4946         struct sk_buff *skb, *next;
4947
4948         INIT_LIST_HEAD(&sublist);
4949         list_for_each_entry_safe(skb, next, head, list) {
4950                 struct net_device *orig_dev = skb->dev;
4951                 struct packet_type *pt_prev = NULL;
4952
4953                 list_del(&skb->list);
4954                 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4955                 if (!pt_prev)
4956                         continue;
4957                 if (pt_curr != pt_prev || od_curr != orig_dev) {
4958                         /* dispatch old sublist */
4959                         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4960                         /* start new sublist */
4961                         INIT_LIST_HEAD(&sublist);
4962                         pt_curr = pt_prev;
4963                         od_curr = orig_dev;
4964                 }
4965                 list_add_tail(&skb->list, &sublist);
4966         }
4967
4968         /* dispatch final sublist */
4969         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4970 }
4971
4972 static int __netif_receive_skb(struct sk_buff *skb)
4973 {
4974         int ret;
4975
4976         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4977                 unsigned int noreclaim_flag;
4978
4979                 /*
4980                  * PFMEMALLOC skbs are special, they should
4981                  * - be delivered to SOCK_MEMALLOC sockets only
4982                  * - stay away from userspace
4983                  * - have bounded memory usage
4984                  *
4985                  * Use PF_MEMALLOC as this saves us from propagating the allocation
4986                  * context down to all allocation sites.
4987                  */
4988                 noreclaim_flag = memalloc_noreclaim_save();
4989                 ret = __netif_receive_skb_one_core(skb, true);
4990                 memalloc_noreclaim_restore(noreclaim_flag);
4991         } else
4992                 ret = __netif_receive_skb_one_core(skb, false);
4993
4994         return ret;
4995 }
4996
4997 static void __netif_receive_skb_list(struct list_head *head)
4998 {
4999         unsigned long noreclaim_flag = 0;
5000         struct sk_buff *skb, *next;
5001         bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5002
5003         list_for_each_entry_safe(skb, next, head, list) {
5004                 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5005                         struct list_head sublist;
5006
5007                         /* Handle the previous sublist */
5008                         list_cut_before(&sublist, head, &skb->list);
5009                         if (!list_empty(&sublist))
5010                                 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5011                         pfmemalloc = !pfmemalloc;
5012                         /* See comments in __netif_receive_skb */
5013                         if (pfmemalloc)
5014                                 noreclaim_flag = memalloc_noreclaim_save();
5015                         else
5016                                 memalloc_noreclaim_restore(noreclaim_flag);
5017                 }
5018         }
5019         /* Handle the remaining sublist */
5020         if (!list_empty(head))
5021                 __netif_receive_skb_list_core(head, pfmemalloc);
5022         /* Restore pflags */
5023         if (pfmemalloc)
5024                 memalloc_noreclaim_restore(noreclaim_flag);
5025 }
5026
5027 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5028 {
5029         struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5030         struct bpf_prog *new = xdp->prog;
5031         int ret = 0;
5032
5033         switch (xdp->command) {
5034         case XDP_SETUP_PROG:
5035                 rcu_assign_pointer(dev->xdp_prog, new);
5036                 if (old)
5037                         bpf_prog_put(old);
5038
5039                 if (old && !new) {
5040                         static_branch_dec(&generic_xdp_needed_key);
5041                 } else if (new && !old) {
5042                         static_branch_inc(&generic_xdp_needed_key);
5043                         dev_disable_lro(dev);
5044                         dev_disable_gro_hw(dev);
5045                 }
5046                 break;
5047
5048         case XDP_QUERY_PROG:
5049                 xdp->prog_id = old ? old->aux->id : 0;
5050                 break;
5051
5052         default:
5053                 ret = -EINVAL;
5054                 break;
5055         }
5056
5057         return ret;
5058 }
5059
5060 static int netif_receive_skb_internal(struct sk_buff *skb)
5061 {
5062         int ret;
5063
5064         net_timestamp_check(netdev_tstamp_prequeue, skb);
5065
5066         if (skb_defer_rx_timestamp(skb))
5067                 return NET_RX_SUCCESS;
5068
5069         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5070                 int ret;
5071
5072                 preempt_disable();
5073                 rcu_read_lock();
5074                 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5075                 rcu_read_unlock();
5076                 preempt_enable();
5077
5078                 if (ret != XDP_PASS)
5079                         return NET_RX_DROP;
5080         }
5081
5082         rcu_read_lock();
5083 #ifdef CONFIG_RPS
5084         if (static_key_false(&rps_needed)) {
5085                 struct rps_dev_flow voidflow, *rflow = &voidflow;
5086                 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5087
5088                 if (cpu >= 0) {
5089                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5090                         rcu_read_unlock();
5091                         return ret;
5092                 }
5093         }
5094 #endif
5095         ret = __netif_receive_skb(skb);
5096         rcu_read_unlock();
5097         return ret;
5098 }
5099
5100 static void netif_receive_skb_list_internal(struct list_head *head)
5101 {
5102         struct bpf_prog *xdp_prog = NULL;
5103         struct sk_buff *skb, *next;
5104         struct list_head sublist;
5105
5106         INIT_LIST_HEAD(&sublist);
5107         list_for_each_entry_safe(skb, next, head, list) {
5108                 net_timestamp_check(netdev_tstamp_prequeue, skb);
5109                 list_del(&skb->list);
5110                 if (!skb_defer_rx_timestamp(skb))
5111                         list_add_tail(&skb->list, &sublist);
5112         }
5113         list_splice_init(&sublist, head);
5114
5115         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5116                 preempt_disable();
5117                 rcu_read_lock();
5118                 list_for_each_entry_safe(skb, next, head, list) {
5119                         xdp_prog = rcu_dereference(skb->dev->xdp_prog);
5120                         list_del(&skb->list);
5121                         if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
5122                                 list_add_tail(&skb->list, &sublist);
5123                 }
5124                 rcu_read_unlock();
5125                 preempt_enable();
5126                 /* Put passed packets back on main list */
5127                 list_splice_init(&sublist, head);
5128         }
5129
5130         rcu_read_lock();
5131 #ifdef CONFIG_RPS
5132         if (static_key_false(&rps_needed)) {
5133                 list_for_each_entry_safe(skb, next, head, list) {
5134                         struct rps_dev_flow voidflow, *rflow = &voidflow;
5135                         int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5136
5137                         if (cpu >= 0) {
5138                                 /* Will be handled, remove from list */
5139                                 list_del(&skb->list);
5140                                 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5141                         }
5142                 }
5143         }
5144 #endif
5145         __netif_receive_skb_list(head);
5146         rcu_read_unlock();
5147 }
5148
5149 /**
5150  *      netif_receive_skb - process receive buffer from network
5151  *      @skb: buffer to process
5152  *
5153  *      netif_receive_skb() is the main receive data processing function.
5154  *      It always succeeds. The buffer may be dropped during processing
5155  *      for congestion control or by the protocol layers.
5156  *
5157  *      This function may only be called from softirq context and interrupts
5158  *      should be enabled.
5159  *
5160  *      Return values (usually ignored):
5161  *      NET_RX_SUCCESS: no congestion
5162  *      NET_RX_DROP: packet was dropped
5163  */
5164 int netif_receive_skb(struct sk_buff *skb)
5165 {
5166         trace_netif_receive_skb_entry(skb);
5167
5168         return netif_receive_skb_internal(skb);
5169 }
5170 EXPORT_SYMBOL(netif_receive_skb);
5171
5172 /**
5173  *      netif_receive_skb_list - process many receive buffers from network
5174  *      @head: list of skbs to process.
5175  *
5176  *      Since return value of netif_receive_skb() is normally ignored, and
5177  *      wouldn't be meaningful for a list, this function returns void.
5178  *
5179  *      This function may only be called from softirq context and interrupts
5180  *      should be enabled.
5181  */
5182 void netif_receive_skb_list(struct list_head *head)
5183 {
5184         struct sk_buff *skb;
5185
5186         if (list_empty(head))
5187                 return;
5188         list_for_each_entry(skb, head, list)
5189                 trace_netif_receive_skb_list_entry(skb);
5190         netif_receive_skb_list_internal(head);
5191 }
5192 EXPORT_SYMBOL(netif_receive_skb_list);
5193
5194 DEFINE_PER_CPU(struct work_struct, flush_works);
5195
5196 /* Network device is going away, flush any packets still pending */
5197 static void flush_backlog(struct work_struct *work)
5198 {
5199         struct sk_buff *skb, *tmp;
5200         struct softnet_data *sd;
5201
5202         local_bh_disable();
5203         sd = this_cpu_ptr(&softnet_data);
5204
5205         local_irq_disable();
5206         rps_lock(sd);
5207         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5208                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5209                         __skb_unlink(skb, &sd->input_pkt_queue);
5210                         kfree_skb(skb);
5211                         input_queue_head_incr(sd);
5212                 }
5213         }
5214         rps_unlock(sd);
5215         local_irq_enable();
5216
5217         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5218                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5219                         __skb_unlink(skb, &sd->process_queue);
5220                         kfree_skb(skb);
5221                         input_queue_head_incr(sd);
5222                 }
5223         }
5224         local_bh_enable();
5225 }
5226
5227 static void flush_all_backlogs(void)
5228 {
5229         unsigned int cpu;
5230
5231         get_online_cpus();
5232
5233         for_each_online_cpu(cpu)
5234                 queue_work_on(cpu, system_highpri_wq,
5235                               per_cpu_ptr(&flush_works, cpu));
5236
5237         for_each_online_cpu(cpu)
5238                 flush_work(per_cpu_ptr(&flush_works, cpu));
5239
5240         put_online_cpus();
5241 }
5242
5243 static int napi_gro_complete(struct sk_buff *skb)
5244 {
5245         struct packet_offload *ptype;
5246         __be16 type = skb->protocol;
5247         struct list_head *head = &offload_base;
5248         int err = -ENOENT;
5249
5250         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5251
5252         if (NAPI_GRO_CB(skb)->count == 1) {
5253                 skb_shinfo(skb)->gso_size = 0;
5254                 goto out;
5255         }
5256
5257         rcu_read_lock();
5258         list_for_each_entry_rcu(ptype, head, list) {
5259                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5260                         continue;
5261
5262                 err = ptype->callbacks.gro_complete(skb, 0);
5263                 break;
5264         }
5265         rcu_read_unlock();
5266
5267         if (err) {
5268                 WARN_ON(&ptype->list == head);
5269                 kfree_skb(skb);
5270                 return NET_RX_SUCCESS;
5271         }
5272
5273 out:
5274         return netif_receive_skb_internal(skb);
5275 }
5276
5277 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5278                                    bool flush_old)
5279 {
5280         struct list_head *head = &napi->gro_hash[index].list;
5281         struct sk_buff *skb, *p;
5282
5283         list_for_each_entry_safe_reverse(skb, p, head, list) {
5284                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5285                         return;
5286                 list_del(&skb->list);
5287                 skb->next = NULL;
5288                 napi_gro_complete(skb);
5289                 napi->gro_hash[index].count--;
5290         }
5291
5292         if (!napi->gro_hash[index].count)
5293                 __clear_bit(index, &napi->gro_bitmask);
5294 }
5295
5296 /* napi->gro_hash[].list contains packets ordered by age.
5297  * youngest packets at the head of it.
5298  * Complete skbs in reverse order to reduce latencies.
5299  */
5300 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5301 {
5302         u32 i;
5303
5304         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
5305                 if (test_bit(i, &napi->gro_bitmask))
5306                         __napi_gro_flush_chain(napi, i, flush_old);
5307         }
5308 }
5309 EXPORT_SYMBOL(napi_gro_flush);
5310
5311 static struct list_head *gro_list_prepare(struct napi_struct *napi,
5312                                           struct sk_buff *skb)
5313 {
5314         unsigned int maclen = skb->dev->hard_header_len;
5315         u32 hash = skb_get_hash_raw(skb);
5316         struct list_head *head;
5317         struct sk_buff *p;
5318
5319         head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5320         list_for_each_entry(p, head, list) {
5321                 unsigned long diffs;
5322
5323                 NAPI_GRO_CB(p)->flush = 0;
5324
5325                 if (hash != skb_get_hash_raw(p)) {
5326                         NAPI_GRO_CB(p)->same_flow = 0;
5327                         continue;
5328                 }
5329
5330                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5331                 diffs |= p->vlan_tci ^ skb->vlan_tci;
5332                 diffs |= skb_metadata_dst_cmp(p, skb);
5333                 diffs |= skb_metadata_differs(p, skb);
5334                 if (maclen == ETH_HLEN)
5335                         diffs |= compare_ether_header(skb_mac_header(p),
5336                                                       skb_mac_header(skb));
5337                 else if (!diffs)
5338                         diffs = memcmp(skb_mac_header(p),
5339                                        skb_mac_header(skb),
5340                                        maclen);
5341                 NAPI_GRO_CB(p)->same_flow = !diffs;
5342         }
5343
5344         return head;
5345 }
5346
5347 static void skb_gro_reset_offset(struct sk_buff *skb)
5348 {
5349         const struct skb_shared_info *pinfo = skb_shinfo(skb);
5350         const skb_frag_t *frag0 = &pinfo->frags[0];
5351
5352         NAPI_GRO_CB(skb)->data_offset = 0;
5353         NAPI_GRO_CB(skb)->frag0 = NULL;
5354         NAPI_GRO_CB(skb)->frag0_len = 0;
5355
5356         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5357             pinfo->nr_frags &&
5358             !PageHighMem(skb_frag_page(frag0))) {
5359                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5360                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5361                                                     skb_frag_size(frag0),
5362                                                     skb->end - skb->tail);
5363         }
5364 }
5365
5366 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5367 {
5368         struct skb_shared_info *pinfo = skb_shinfo(skb);
5369
5370         BUG_ON(skb->end - skb->tail < grow);
5371
5372         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5373
5374         skb->data_len -= grow;
5375         skb->tail += grow;
5376
5377         pinfo->frags[0].page_offset += grow;
5378         skb_frag_size_sub(&pinfo->frags[0], grow);
5379
5380         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5381                 skb_frag_unref(skb, 0);
5382                 memmove(pinfo->frags, pinfo->frags + 1,
5383                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5384         }
5385 }
5386
5387 static void gro_flush_oldest(struct list_head *head)
5388 {
5389         struct sk_buff *oldest;
5390
5391         oldest = list_last_entry(head, struct sk_buff, list);
5392
5393         /* We are called with head length >= MAX_GRO_SKBS, so this is
5394          * impossible.
5395          */
5396         if (WARN_ON_ONCE(!oldest))
5397                 return;
5398
5399         /* Do not adjust napi->gro_hash[].count, caller is adding a new
5400          * SKB to the chain.
5401          */
5402         list_del(&oldest->list);
5403         napi_gro_complete(oldest);
5404 }
5405
5406 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5407 {
5408         u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5409         struct list_head *head = &offload_base;
5410         struct packet_offload *ptype;
5411         __be16 type = skb->protocol;
5412         struct list_head *gro_head;
5413         struct sk_buff *pp = NULL;
5414         enum gro_result ret;
5415         int same_flow;
5416         int grow;
5417
5418         if (netif_elide_gro(skb->dev))
5419                 goto normal;
5420
5421         gro_head = gro_list_prepare(napi, skb);
5422
5423         rcu_read_lock();
5424         list_for_each_entry_rcu(ptype, head, list) {
5425                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5426                         continue;
5427
5428                 skb_set_network_header(skb, skb_gro_offset(skb));
5429                 skb_reset_mac_len(skb);
5430                 NAPI_GRO_CB(skb)->same_flow = 0;
5431                 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5432                 NAPI_GRO_CB(skb)->free = 0;
5433                 NAPI_GRO_CB(skb)->encap_mark = 0;
5434                 NAPI_GRO_CB(skb)->recursion_counter = 0;
5435                 NAPI_GRO_CB(skb)->is_fou = 0;
5436                 NAPI_GRO_CB(skb)->is_atomic = 1;
5437                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5438
5439                 /* Setup for GRO checksum validation */
5440                 switch (skb->ip_summed) {
5441                 case CHECKSUM_COMPLETE:
5442                         NAPI_GRO_CB(skb)->csum = skb->csum;
5443                         NAPI_GRO_CB(skb)->csum_valid = 1;
5444                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5445                         break;
5446                 case CHECKSUM_UNNECESSARY:
5447                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5448                         NAPI_GRO_CB(skb)->csum_valid = 0;
5449                         break;
5450                 default:
5451                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5452                         NAPI_GRO_CB(skb)->csum_valid = 0;
5453                 }
5454
5455                 pp = ptype->callbacks.gro_receive(gro_head, skb);
5456                 break;
5457         }
5458         rcu_read_unlock();
5459
5460         if (&ptype->list == head)
5461                 goto normal;
5462
5463         if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5464                 ret = GRO_CONSUMED;
5465                 goto ok;
5466         }
5467
5468         same_flow = NAPI_GRO_CB(skb)->same_flow;
5469         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5470
5471         if (pp) {
5472                 list_del(&pp->list);
5473                 pp->next = NULL;
5474                 napi_gro_complete(pp);
5475                 napi->gro_hash[hash].count--;
5476         }
5477
5478         if (same_flow)
5479                 goto ok;
5480
5481         if (NAPI_GRO_CB(skb)->flush)
5482                 goto normal;
5483
5484         if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5485                 gro_flush_oldest(gro_head);
5486         } else {
5487                 napi->gro_hash[hash].count++;
5488         }
5489         NAPI_GRO_CB(skb)->count = 1;
5490         NAPI_GRO_CB(skb)->age = jiffies;
5491         NAPI_GRO_CB(skb)->last = skb;
5492         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5493         list_add(&skb->list, gro_head);
5494         ret = GRO_HELD;
5495
5496 pull:
5497         grow = skb_gro_offset(skb) - skb_headlen(skb);
5498         if (grow > 0)
5499                 gro_pull_from_frag0(skb, grow);
5500 ok:
5501         if (napi->gro_hash[hash].count) {
5502                 if (!test_bit(hash, &napi->gro_bitmask))
5503                         __set_bit(hash, &napi->gro_bitmask);
5504         } else if (test_bit(hash, &napi->gro_bitmask)) {
5505                 __clear_bit(hash, &napi->gro_bitmask);
5506         }
5507
5508         return ret;
5509
5510 normal:
5511         ret = GRO_NORMAL;
5512         goto pull;
5513 }
5514
5515 struct packet_offload *gro_find_receive_by_type(__be16 type)
5516 {
5517         struct list_head *offload_head = &offload_base;
5518         struct packet_offload *ptype;
5519
5520         list_for_each_entry_rcu(ptype, offload_head, list) {
5521                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5522                         continue;
5523                 return ptype;
5524         }
5525         return NULL;
5526 }
5527 EXPORT_SYMBOL(gro_find_receive_by_type);
5528
5529 struct packet_offload *gro_find_complete_by_type(__be16 type)
5530 {
5531         struct list_head *offload_head = &offload_base;
5532         struct packet_offload *ptype;
5533
5534         list_for_each_entry_rcu(ptype, offload_head, list) {
5535                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5536                         continue;
5537                 return ptype;
5538         }
5539         return NULL;
5540 }
5541 EXPORT_SYMBOL(gro_find_complete_by_type);
5542
5543 static void napi_skb_free_stolen_head(struct sk_buff *skb)
5544 {
5545         skb_dst_drop(skb);
5546         secpath_reset(skb);
5547         kmem_cache_free(skbuff_head_cache, skb);
5548 }
5549
5550 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5551 {
5552         switch (ret) {
5553         case GRO_NORMAL:
5554                 if (netif_receive_skb_internal(skb))
5555                         ret = GRO_DROP;
5556                 break;
5557
5558         case GRO_DROP:
5559                 kfree_skb(skb);
5560                 break;
5561
5562         case GRO_MERGED_FREE:
5563                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5564                         napi_skb_free_stolen_head(skb);
5565                 else
5566                         __kfree_skb(skb);
5567                 break;
5568
5569         case GRO_HELD:
5570         case GRO_MERGED:
5571         case GRO_CONSUMED:
5572                 break;
5573         }
5574
5575         return ret;
5576 }
5577
5578 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5579 {
5580         skb_mark_napi_id(skb, napi);
5581         trace_napi_gro_receive_entry(skb);
5582
5583         skb_gro_reset_offset(skb);
5584
5585         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
5586 }
5587 EXPORT_SYMBOL(napi_gro_receive);
5588
5589 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5590 {
5591         if (unlikely(skb->pfmemalloc)) {
5592                 consume_skb(skb);
5593                 return;
5594         }
5595         __skb_pull(skb, skb_headlen(skb));
5596         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5597         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
5598         skb->vlan_tci = 0;
5599         skb->dev = napi->dev;
5600         skb->skb_iif = 0;
5601         skb->encapsulation = 0;
5602         skb_shinfo(skb)->gso_type = 0;
5603         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5604         secpath_reset(skb);
5605
5606         napi->skb = skb;
5607 }
5608
5609 struct sk_buff *napi_get_frags(struct napi_struct *napi)
5610 {
5611         struct sk_buff *skb = napi->skb;
5612
5613         if (!skb) {
5614                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
5615                 if (skb) {
5616                         napi->skb = skb;
5617                         skb_mark_napi_id(skb, napi);
5618                 }
5619         }
5620         return skb;
5621 }
5622 EXPORT_SYMBOL(napi_get_frags);
5623
5624 static gro_result_t napi_frags_finish(struct napi_struct *napi,
5625                                       struct sk_buff *skb,
5626                                       gro_result_t ret)
5627 {
5628         switch (ret) {
5629         case GRO_NORMAL:
5630         case GRO_HELD:
5631                 __skb_push(skb, ETH_HLEN);
5632                 skb->protocol = eth_type_trans(skb, skb->dev);
5633                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
5634                         ret = GRO_DROP;
5635                 break;
5636
5637         case GRO_DROP:
5638                 napi_reuse_skb(napi, skb);
5639                 break;
5640
5641         case GRO_MERGED_FREE:
5642                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5643                         napi_skb_free_stolen_head(skb);
5644                 else
5645                         napi_reuse_skb(napi, skb);
5646                 break;
5647
5648         case GRO_MERGED:
5649         case GRO_CONSUMED:
5650                 break;
5651         }
5652
5653         return ret;
5654 }
5655
5656 /* Upper GRO stack assumes network header starts at gro_offset=0
5657  * Drivers could call both napi_gro_frags() and napi_gro_receive()
5658  * We copy ethernet header into skb->data to have a common layout.
5659  */
5660 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
5661 {
5662         struct sk_buff *skb = napi->skb;
5663         const struct ethhdr *eth;
5664         unsigned int hlen = sizeof(*eth);
5665
5666         napi->skb = NULL;
5667
5668         skb_reset_mac_header(skb);
5669         skb_gro_reset_offset(skb);
5670
5671         eth = skb_gro_header_fast(skb, 0);
5672         if (unlikely(skb_gro_header_hard(skb, hlen))) {
5673                 eth = skb_gro_header_slow(skb, hlen, 0);
5674                 if (unlikely(!eth)) {
5675                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5676                                              __func__, napi->dev->name);
5677                         napi_reuse_skb(napi, skb);
5678                         return NULL;
5679                 }
5680         } else {
5681                 gro_pull_from_frag0(skb, hlen);
5682                 NAPI_GRO_CB(skb)->frag0 += hlen;
5683                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
5684         }
5685         __skb_pull(skb, hlen);
5686
5687         /*
5688          * This works because the only protocols we care about don't require
5689          * special handling.
5690          * We'll fix it up properly in napi_frags_finish()
5691          */
5692         skb->protocol = eth->h_proto;
5693
5694         return skb;
5695 }
5696
5697 gro_result_t napi_gro_frags(struct napi_struct *napi)
5698 {
5699         struct sk_buff *skb = napi_frags_skb(napi);
5700
5701         if (!skb)
5702                 return GRO_DROP;
5703
5704         trace_napi_gro_frags_entry(skb);
5705
5706         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5707 }
5708 EXPORT_SYMBOL(napi_gro_frags);
5709
5710 /* Compute the checksum from gro_offset and return the folded value
5711  * after adding in any pseudo checksum.
5712  */
5713 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5714 {
5715         __wsum wsum;
5716         __sum16 sum;
5717
5718         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5719
5720         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5721         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5722         if (likely(!sum)) {
5723                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5724                     !skb->csum_complete_sw)
5725                         netdev_rx_csum_fault(skb->dev);
5726         }
5727
5728         NAPI_GRO_CB(skb)->csum = wsum;
5729         NAPI_GRO_CB(skb)->csum_valid = 1;
5730
5731         return sum;
5732 }
5733 EXPORT_SYMBOL(__skb_gro_checksum_complete);
5734
5735 static void net_rps_send_ipi(struct softnet_data *remsd)
5736 {
5737 #ifdef CONFIG_RPS
5738         while (remsd) {
5739                 struct softnet_data *next = remsd->rps_ipi_next;
5740
5741                 if (cpu_online(remsd->cpu))
5742                         smp_call_function_single_async(remsd->cpu, &remsd->csd);
5743                 remsd = next;
5744         }
5745 #endif
5746 }
5747
5748 /*
5749  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5750  * Note: called with local irq disabled, but exits with local irq enabled.
5751  */
5752 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5753 {
5754 #ifdef CONFIG_RPS
5755         struct softnet_data *remsd = sd->rps_ipi_list;
5756
5757         if (remsd) {
5758                 sd->rps_ipi_list = NULL;
5759
5760                 local_irq_enable();
5761
5762                 /* Send pending IPI's to kick RPS processing on remote cpus. */
5763                 net_rps_send_ipi(remsd);
5764         } else
5765 #endif
5766                 local_irq_enable();
5767 }
5768
5769 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5770 {
5771 #ifdef CONFIG_RPS
5772         return sd->rps_ipi_list != NULL;
5773 #else
5774         return false;
5775 #endif
5776 }
5777
5778 static int process_backlog(struct napi_struct *napi, int quota)
5779 {
5780         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5781         bool again = true;
5782         int work = 0;
5783
5784         /* Check if we have pending ipi, its better to send them now,
5785          * not waiting net_rx_action() end.
5786          */
5787         if (sd_has_rps_ipi_waiting(sd)) {
5788                 local_irq_disable();
5789                 net_rps_action_and_irq_enable(sd);
5790         }
5791
5792         napi->weight = dev_rx_weight;
5793         while (again) {
5794                 struct sk_buff *skb;
5795
5796                 while ((skb = __skb_dequeue(&sd->process_queue))) {
5797                         rcu_read_lock();
5798                         __netif_receive_skb(skb);
5799                         rcu_read_unlock();
5800                         input_queue_head_incr(sd);
5801                         if (++work >= quota)
5802                                 return work;
5803
5804                 }
5805
5806                 local_irq_disable();
5807                 rps_lock(sd);
5808                 if (skb_queue_empty(&sd->input_pkt_queue)) {
5809                         /*
5810                          * Inline a custom version of __napi_complete().
5811                          * only current cpu owns and manipulates this napi,
5812                          * and NAPI_STATE_SCHED is the only possible flag set
5813                          * on backlog.
5814                          * We can use a plain write instead of clear_bit(),
5815                          * and we dont need an smp_mb() memory barrier.
5816                          */
5817                         napi->state = 0;
5818                         again = false;
5819                 } else {
5820                         skb_queue_splice_tail_init(&sd->input_pkt_queue,
5821                                                    &sd->process_queue);
5822                 }
5823                 rps_unlock(sd);
5824                 local_irq_enable();
5825         }
5826
5827         return work;
5828 }
5829
5830 /**
5831  * __napi_schedule - schedule for receive
5832  * @n: entry to schedule
5833  *
5834  * The entry's receive function will be scheduled to run.
5835  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5836  */
5837 void __napi_schedule(struct napi_struct *n)
5838 {
5839         unsigned long flags;
5840
5841         local_irq_save(flags);
5842         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5843         local_irq_restore(flags);
5844 }
5845 EXPORT_SYMBOL(__napi_schedule);
5846
5847 /**
5848  *      napi_schedule_prep - check if napi can be scheduled
5849  *      @n: napi context
5850  *
5851  * Test if NAPI routine is already running, and if not mark
5852  * it as running.  This is used as a condition variable
5853  * insure only one NAPI poll instance runs.  We also make
5854  * sure there is no pending NAPI disable.
5855  */
5856 bool napi_schedule_prep(struct napi_struct *n)
5857 {
5858         unsigned long val, new;
5859
5860         do {
5861                 val = READ_ONCE(n->state);
5862                 if (unlikely(val & NAPIF_STATE_DISABLE))
5863                         return false;
5864                 new = val | NAPIF_STATE_SCHED;
5865
5866                 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5867                  * This was suggested by Alexander Duyck, as compiler
5868                  * emits better code than :
5869                  * if (val & NAPIF_STATE_SCHED)
5870                  *     new |= NAPIF_STATE_MISSED;
5871                  */
5872                 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5873                                                    NAPIF_STATE_MISSED;
5874         } while (cmpxchg(&n->state, val, new) != val);
5875
5876         return !(val & NAPIF_STATE_SCHED);
5877 }
5878 EXPORT_SYMBOL(napi_schedule_prep);
5879
5880 /**
5881  * __napi_schedule_irqoff - schedule for receive
5882  * @n: entry to schedule
5883  *
5884  * Variant of __napi_schedule() assuming hard irqs are masked
5885  */
5886 void __napi_schedule_irqoff(struct napi_struct *n)
5887 {
5888         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5889 }
5890 EXPORT_SYMBOL(__napi_schedule_irqoff);
5891
5892 bool napi_complete_done(struct napi_struct *n, int work_done)
5893 {
5894         unsigned long flags, val, new;
5895
5896         /*
5897          * 1) Don't let napi dequeue from the cpu poll list
5898          *    just in case its running on a different cpu.
5899          * 2) If we are busy polling, do nothing here, we have
5900          *    the guarantee we will be called later.
5901          */
5902         if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5903                                  NAPIF_STATE_IN_BUSY_POLL)))
5904                 return false;
5905
5906         if (n->gro_bitmask) {
5907                 unsigned long timeout = 0;
5908
5909                 if (work_done)
5910                         timeout = n->dev->gro_flush_timeout;
5911
5912                 if (timeout)
5913                         hrtimer_start(&n->timer, ns_to_ktime(timeout),
5914                                       HRTIMER_MODE_REL_PINNED);
5915                 else
5916                         napi_gro_flush(n, false);
5917         }
5918         if (unlikely(!list_empty(&n->poll_list))) {
5919                 /* If n->poll_list is not empty, we need to mask irqs */
5920                 local_irq_save(flags);
5921                 list_del_init(&n->poll_list);
5922                 local_irq_restore(flags);
5923         }
5924
5925         do {
5926                 val = READ_ONCE(n->state);
5927
5928                 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5929
5930                 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5931
5932                 /* If STATE_MISSED was set, leave STATE_SCHED set,
5933                  * because we will call napi->poll() one more time.
5934                  * This C code was suggested by Alexander Duyck to help gcc.
5935                  */
5936                 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5937                                                     NAPIF_STATE_SCHED;
5938         } while (cmpxchg(&n->state, val, new) != val);
5939
5940         if (unlikely(val & NAPIF_STATE_MISSED)) {
5941                 __napi_schedule(n);
5942                 return false;
5943         }
5944
5945         return true;
5946 }
5947 EXPORT_SYMBOL(napi_complete_done);
5948
5949 /* must be called under rcu_read_lock(), as we dont take a reference */
5950 static struct napi_struct *napi_by_id(unsigned int napi_id)
5951 {
5952         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5953         struct napi_struct *napi;
5954
5955         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5956                 if (napi->napi_id == napi_id)
5957                         return napi;
5958
5959         return NULL;
5960 }
5961
5962 #if defined(CONFIG_NET_RX_BUSY_POLL)
5963
5964 #define BUSY_POLL_BUDGET 8
5965
5966 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5967 {
5968         int rc;
5969
5970         /* Busy polling means there is a high chance device driver hard irq
5971          * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5972          * set in napi_schedule_prep().
5973          * Since we are about to call napi->poll() once more, we can safely
5974          * clear NAPI_STATE_MISSED.
5975          *
5976          * Note: x86 could use a single "lock and ..." instruction
5977          * to perform these two clear_bit()
5978          */
5979         clear_bit(NAPI_STATE_MISSED, &napi->state);
5980         clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5981
5982         local_bh_disable();
5983
5984         /* All we really want here is to re-enable device interrupts.
5985          * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5986          */
5987         rc = napi->poll(napi, BUSY_POLL_BUDGET);
5988         trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5989         netpoll_poll_unlock(have_poll_lock);
5990         if (rc == BUSY_POLL_BUDGET)
5991                 __napi_schedule(napi);
5992         local_bh_enable();
5993 }
5994
5995 void napi_busy_loop(unsigned int napi_id,
5996                     bool (*loop_end)(void *, unsigned long),
5997                     void *loop_end_arg)
5998 {
5999         unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6000         int (*napi_poll)(struct napi_struct *napi, int budget);
6001         void *have_poll_lock = NULL;
6002         struct napi_struct *napi;
6003
6004 restart:
6005         napi_poll = NULL;
6006
6007         rcu_read_lock();
6008
6009         napi = napi_by_id(napi_id);
6010         if (!napi)
6011                 goto out;
6012
6013         preempt_disable();
6014         for (;;) {
6015                 int work = 0;
6016
6017                 local_bh_disable();
6018                 if (!napi_poll) {
6019                         unsigned long val = READ_ONCE(napi->state);
6020
6021                         /* If multiple threads are competing for this napi,
6022                          * we avoid dirtying napi->state as much as we can.
6023                          */
6024                         if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6025                                    NAPIF_STATE_IN_BUSY_POLL))
6026                                 goto count;
6027                         if (cmpxchg(&napi->state, val,
6028                                     val | NAPIF_STATE_IN_BUSY_POLL |
6029                                           NAPIF_STATE_SCHED) != val)
6030                                 goto count;
6031                         have_poll_lock = netpoll_poll_lock(napi);
6032                         napi_poll = napi->poll;
6033                 }
6034                 work = napi_poll(napi, BUSY_POLL_BUDGET);
6035                 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6036 count:
6037                 if (work > 0)
6038                         __NET_ADD_STATS(dev_net(napi->dev),
6039                                         LINUX_MIB_BUSYPOLLRXPACKETS, work);
6040                 local_bh_enable();
6041
6042                 if (!loop_end || loop_end(loop_end_arg, start_time))
6043                         break;
6044
6045                 if (unlikely(need_resched())) {
6046                         if (napi_poll)
6047                                 busy_poll_stop(napi, have_poll_lock);
6048                         preempt_enable();
6049                         rcu_read_unlock();
6050                         cond_resched();
6051                         if (loop_end(loop_end_arg, start_time))
6052                                 return;
6053                         goto restart;
6054                 }
6055                 cpu_relax();
6056         }
6057         if (napi_poll)
6058                 busy_poll_stop(napi, have_poll_lock);
6059         preempt_enable();
6060 out:
6061         rcu_read_unlock();
6062 }
6063 EXPORT_SYMBOL(napi_busy_loop);
6064
6065 #endif /* CONFIG_NET_RX_BUSY_POLL */
6066
6067 static void napi_hash_add(struct napi_struct *napi)
6068 {
6069         if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6070             test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6071                 return;
6072
6073         spin_lock(&napi_hash_lock);
6074
6075         /* 0..NR_CPUS range is reserved for sender_cpu use */
6076         do {
6077                 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6078                         napi_gen_id = MIN_NAPI_ID;
6079         } while (napi_by_id(napi_gen_id));
6080         napi->napi_id = napi_gen_id;
6081
6082         hlist_add_head_rcu(&napi->napi_hash_node,
6083                            &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6084
6085         spin_unlock(&napi_hash_lock);
6086 }
6087
6088 /* Warning : caller is responsible to make sure rcu grace period
6089  * is respected before freeing memory containing @napi
6090  */
6091 bool napi_hash_del(struct napi_struct *napi)
6092 {
6093         bool rcu_sync_needed = false;
6094
6095         spin_lock(&napi_hash_lock);
6096
6097         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6098                 rcu_sync_needed = true;
6099                 hlist_del_rcu(&napi->napi_hash_node);
6100         }
6101         spin_unlock(&napi_hash_lock);
6102         return rcu_sync_needed;
6103 }
6104 EXPORT_SYMBOL_GPL(napi_hash_del);
6105
6106 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6107 {
6108         struct napi_struct *napi;
6109
6110         napi = container_of(timer, struct napi_struct, timer);
6111
6112         /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6113          * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6114          */
6115         if (napi->gro_bitmask && !napi_disable_pending(napi) &&
6116             !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6117                 __napi_schedule_irqoff(napi);
6118
6119         return HRTIMER_NORESTART;
6120 }
6121
6122 static void init_gro_hash(struct napi_struct *napi)
6123 {
6124         int i;
6125
6126         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6127                 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6128                 napi->gro_hash[i].count = 0;
6129         }
6130         napi->gro_bitmask = 0;
6131 }
6132
6133 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6134                     int (*poll)(struct napi_struct *, int), int weight)
6135 {
6136         INIT_LIST_HEAD(&napi->poll_list);
6137         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6138         napi->timer.function = napi_watchdog;
6139         init_gro_hash(napi);
6140         napi->skb = NULL;
6141         napi->poll = poll;
6142         if (weight > NAPI_POLL_WEIGHT)
6143                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
6144                             weight, dev->name);
6145         napi->weight = weight;
6146         list_add(&napi->dev_list, &dev->napi_list);
6147         napi->dev = dev;
6148 #ifdef CONFIG_NETPOLL
6149         napi->poll_owner = -1;
6150 #endif
6151         set_bit(NAPI_STATE_SCHED, &napi->state);
6152         napi_hash_add(napi);
6153 }
6154 EXPORT_SYMBOL(netif_napi_add);
6155
6156 void napi_disable(struct napi_struct *n)
6157 {
6158         might_sleep();
6159         set_bit(NAPI_STATE_DISABLE, &n->state);
6160
6161         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6162                 msleep(1);
6163         while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6164                 msleep(1);
6165
6166         hrtimer_cancel(&n->timer);
6167
6168         clear_bit(NAPI_STATE_DISABLE, &n->state);
6169 }
6170 EXPORT_SYMBOL(napi_disable);
6171
6172 static void flush_gro_hash(struct napi_struct *napi)
6173 {
6174         int i;
6175
6176         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6177                 struct sk_buff *skb, *n;
6178
6179                 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6180                         kfree_skb(skb);
6181                 napi->gro_hash[i].count = 0;
6182         }
6183 }
6184
6185 /* Must be called in process context */
6186 void netif_napi_del(struct napi_struct *napi)
6187 {
6188         might_sleep();
6189         if (napi_hash_del(napi))
6190                 synchronize_net();
6191         list_del_init(&napi->dev_list);
6192         napi_free_frags(napi);
6193
6194         flush_gro_hash(napi);
6195         napi->gro_bitmask = 0;
6196 }
6197 EXPORT_SYMBOL(netif_napi_del);
6198
6199 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6200 {
6201         void *have;
6202         int work, weight;
6203
6204         list_del_init(&n->poll_list);
6205
6206         have = netpoll_poll_lock(n);
6207
6208         weight = n->weight;
6209
6210         /* This NAPI_STATE_SCHED test is for avoiding a race
6211          * with netpoll's poll_napi().  Only the entity which
6212          * obtains the lock and sees NAPI_STATE_SCHED set will
6213          * actually make the ->poll() call.  Therefore we avoid
6214          * accidentally calling ->poll() when NAPI is not scheduled.
6215          */
6216         work = 0;
6217         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6218                 work = n->poll(n, weight);
6219                 trace_napi_poll(n, work, weight);
6220         }
6221
6222         WARN_ON_ONCE(work > weight);
6223
6224         if (likely(work < weight))
6225                 goto out_unlock;
6226
6227         /* Drivers must not modify the NAPI state if they
6228          * consume the entire weight.  In such cases this code
6229          * still "owns" the NAPI instance and therefore can
6230          * move the instance around on the list at-will.
6231          */
6232         if (unlikely(napi_disable_pending(n))) {
6233                 napi_complete(n);
6234                 goto out_unlock;
6235         }
6236
6237         if (n->gro_bitmask) {
6238                 /* flush too old packets
6239                  * If HZ < 1000, flush all packets.
6240                  */
6241                 napi_gro_flush(n, HZ >= 1000);
6242         }
6243
6244         /* Some drivers may have called napi_schedule
6245          * prior to exhausting their budget.
6246          */
6247         if (unlikely(!list_empty(&n->poll_list))) {
6248                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6249                              n->dev ? n->dev->name : "backlog");
6250                 goto out_unlock;
6251         }
6252
6253         list_add_tail(&n->poll_list, repoll);
6254
6255 out_unlock:
6256         netpoll_poll_unlock(have);
6257
6258         return work;
6259 }
6260
6261 static __latent_entropy void net_rx_action(struct softirq_action *h)
6262 {
6263         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6264         unsigned long time_limit = jiffies +
6265                 usecs_to_jiffies(netdev_budget_usecs);
6266         int budget = netdev_budget;
6267         LIST_HEAD(list);
6268         LIST_HEAD(repoll);
6269
6270         local_irq_disable();
6271         list_splice_init(&sd->poll_list, &list);
6272         local_irq_enable();
6273
6274         for (;;) {
6275                 struct napi_struct *n;
6276
6277                 if (list_empty(&list)) {
6278                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6279                                 goto out;
6280                         break;
6281                 }
6282
6283                 n = list_first_entry(&list, struct napi_struct, poll_list);
6284                 budget -= napi_poll(n, &repoll);
6285
6286                 /* If softirq window is exhausted then punt.
6287                  * Allow this to run for 2 jiffies since which will allow
6288                  * an average latency of 1.5/HZ.
6289                  */
6290                 if (unlikely(budget <= 0 ||
6291                              time_after_eq(jiffies, time_limit))) {
6292                         sd->time_squeeze++;
6293                         break;
6294                 }
6295         }
6296
6297         local_irq_disable();
6298
6299         list_splice_tail_init(&sd->poll_list, &list);
6300         list_splice_tail(&repoll, &list);
6301         list_splice(&list, &sd->poll_list);
6302         if (!list_empty(&sd->poll_list))
6303                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6304
6305         net_rps_action_and_irq_enable(sd);
6306 out:
6307         __kfree_skb_flush();
6308 }
6309
6310 struct netdev_adjacent {
6311         struct net_device *dev;
6312
6313         /* upper master flag, there can only be one master device per list */
6314         bool master;
6315
6316         /* counter for the number of times this device was added to us */
6317         u16 ref_nr;
6318
6319         /* private field for the users */
6320         void *private;
6321
6322         struct list_head list;
6323         struct rcu_head rcu;
6324 };
6325
6326 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6327                                                  struct list_head *adj_list)
6328 {
6329         struct netdev_adjacent *adj;
6330
6331         list_for_each_entry(adj, adj_list, list) {
6332                 if (adj->dev == adj_dev)
6333                         return adj;
6334         }
6335         return NULL;
6336 }
6337
6338 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6339 {
6340         struct net_device *dev = data;
6341
6342         return upper_dev == dev;
6343 }
6344
6345 /**
6346  * netdev_has_upper_dev - Check if device is linked to an upper device
6347  * @dev: device
6348  * @upper_dev: upper device to check
6349  *
6350  * Find out if a device is linked to specified upper device and return true
6351  * in case it is. Note that this checks only immediate upper device,
6352  * not through a complete stack of devices. The caller must hold the RTNL lock.
6353  */
6354 bool netdev_has_upper_dev(struct net_device *dev,
6355                           struct net_device *upper_dev)
6356 {
6357         ASSERT_RTNL();
6358
6359         return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6360                                              upper_dev);
6361 }
6362 EXPORT_SYMBOL(netdev_has_upper_dev);
6363
6364 /**
6365  * netdev_has_upper_dev_all - Check if device is linked to an upper device
6366  * @dev: device
6367  * @upper_dev: upper device to check
6368  *
6369  * Find out if a device is linked to specified upper device and return true
6370  * in case it is. Note that this checks the entire upper device chain.
6371  * The caller must hold rcu lock.
6372  */
6373
6374 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6375                                   struct net_device *upper_dev)
6376 {
6377         return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6378                                                upper_dev);
6379 }
6380 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6381
6382 /**
6383  * netdev_has_any_upper_dev - Check if device is linked to some device
6384  * @dev: device
6385  *
6386  * Find out if a device is linked to an upper device and return true in case
6387  * it is. The caller must hold the RTNL lock.
6388  */
6389 bool netdev_has_any_upper_dev(struct net_device *dev)
6390 {
6391         ASSERT_RTNL();
6392
6393         return !list_empty(&dev->adj_list.upper);
6394 }
6395 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6396
6397 /**
6398  * netdev_master_upper_dev_get - Get master upper device
6399  * @dev: device
6400  *
6401  * Find a master upper device and return pointer to it or NULL in case
6402  * it's not there. The caller must hold the RTNL lock.
6403  */
6404 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6405 {
6406         struct netdev_adjacent *upper;
6407
6408         ASSERT_RTNL();
6409
6410         if (list_empty(&dev->adj_list.upper))
6411                 return NULL;
6412
6413         upper = list_first_entry(&dev->adj_list.upper,
6414                                  struct netdev_adjacent, list);
6415         if (likely(upper->master))
6416                 return upper->dev;
6417         return NULL;
6418 }
6419 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6420
6421 /**
6422  * netdev_has_any_lower_dev - Check if device is linked to some device
6423  * @dev: device
6424  *
6425  * Find out if a device is linked to a lower device and return true in case
6426  * it is. The caller must hold the RTNL lock.
6427  */
6428 static bool netdev_has_any_lower_dev(struct net_device *dev)
6429 {
6430         ASSERT_RTNL();
6431
6432         return !list_empty(&dev->adj_list.lower);
6433 }
6434
6435 void *netdev_adjacent_get_private(struct list_head *adj_list)
6436 {
6437         struct netdev_adjacent *adj;
6438
6439         adj = list_entry(adj_list, struct netdev_adjacent, list);
6440
6441         return adj->private;
6442 }
6443 EXPORT_SYMBOL(netdev_adjacent_get_private);
6444
6445 /**
6446  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6447  * @dev: device
6448  * @iter: list_head ** of the current position
6449  *
6450  * Gets the next device from the dev's upper list, starting from iter
6451  * position. The caller must hold RCU read lock.
6452  */
6453 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6454                                                  struct list_head **iter)
6455 {
6456         struct netdev_adjacent *upper;
6457
6458         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6459
6460         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6461
6462         if (&upper->list == &dev->adj_list.upper)
6463                 return NULL;
6464
6465         *iter = &upper->list;
6466
6467         return upper->dev;
6468 }
6469 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6470
6471 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6472                                                     struct list_head **iter)
6473 {
6474         struct netdev_adjacent *upper;
6475
6476         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6477
6478         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6479
6480         if (&upper->list == &dev->adj_list.upper)
6481                 return NULL;
6482
6483         *iter = &upper->list;
6484
6485         return upper->dev;
6486 }
6487
6488 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6489                                   int (*fn)(struct net_device *dev,
6490                                             void *data),
6491                                   void *data)
6492 {
6493         struct net_device *udev;
6494         struct list_head *iter;
6495         int ret;
6496
6497         for (iter = &dev->adj_list.upper,
6498              udev = netdev_next_upper_dev_rcu(dev, &iter);
6499              udev;
6500              udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6501                 /* first is the upper device itself */
6502                 ret = fn(udev, data);
6503                 if (ret)
6504                         return ret;
6505
6506                 /* then look at all of its upper devices */
6507                 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6508                 if (ret)
6509                         return ret;
6510         }
6511
6512         return 0;
6513 }
6514 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6515
6516 /**
6517  * netdev_lower_get_next_private - Get the next ->private from the
6518  *                                 lower neighbour list
6519  * @dev: device
6520  * @iter: list_head ** of the current position
6521  *
6522  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6523  * list, starting from iter position. The caller must hold either hold the
6524  * RTNL lock or its own locking that guarantees that the neighbour lower
6525  * list will remain unchanged.
6526  */
6527 void *netdev_lower_get_next_private(struct net_device *dev,
6528                                     struct list_head **iter)
6529 {
6530         struct netdev_adjacent *lower;
6531
6532         lower = list_entry(*iter, struct netdev_adjacent, list);
6533
6534         if (&lower->list == &dev->adj_list.lower)
6535                 return NULL;
6536
6537         *iter = lower->list.next;
6538
6539         return lower->private;
6540 }
6541 EXPORT_SYMBOL(netdev_lower_get_next_private);
6542
6543 /**
6544  * netdev_lower_get_next_private_rcu - Get the next ->private from the
6545  *                                     lower neighbour list, RCU
6546  *                                     variant
6547  * @dev: device
6548  * @iter: list_head ** of the current position
6549  *
6550  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6551  * list, starting from iter position. The caller must hold RCU read lock.
6552  */
6553 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6554                                         struct list_head **iter)
6555 {
6556         struct netdev_adjacent *lower;
6557
6558         WARN_ON_ONCE(!rcu_read_lock_held());
6559
6560         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6561
6562         if (&lower->list == &dev->adj_list.lower)
6563                 return NULL;
6564
6565         *iter = &lower->list;
6566
6567         return lower->private;
6568 }
6569 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6570
6571 /**
6572  * netdev_lower_get_next - Get the next device from the lower neighbour
6573  *                         list
6574  * @dev: device
6575  * @iter: list_head ** of the current position
6576  *
6577  * Gets the next netdev_adjacent from the dev's lower neighbour
6578  * list, starting from iter position. The caller must hold RTNL lock or
6579  * its own locking that guarantees that the neighbour lower
6580  * list will remain unchanged.
6581  */
6582 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6583 {
6584         struct netdev_adjacent *lower;
6585
6586         lower = list_entry(*iter, struct netdev_adjacent, list);
6587
6588         if (&lower->list == &dev->adj_list.lower)
6589                 return NULL;
6590
6591         *iter = lower->list.next;
6592
6593         return lower->dev;
6594 }
6595 EXPORT_SYMBOL(netdev_lower_get_next);
6596
6597 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6598                                                 struct list_head **iter)
6599 {
6600         struct netdev_adjacent *lower;
6601
6602         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6603
6604         if (&lower->list == &dev->adj_list.lower)
6605                 return NULL;
6606
6607         *iter = &lower->list;
6608
6609         return lower->dev;
6610 }
6611
6612 int netdev_walk_all_lower_dev(struct net_device *dev,
6613                               int (*fn)(struct net_device *dev,
6614                                         void *data),
6615                               void *data)
6616 {
6617         struct net_device *ldev;
6618         struct list_head *iter;
6619         int ret;
6620
6621         for (iter = &dev->adj_list.lower,
6622              ldev = netdev_next_lower_dev(dev, &iter);
6623              ldev;
6624              ldev = netdev_next_lower_dev(dev, &iter)) {
6625                 /* first is the lower device itself */
6626                 ret = fn(ldev, data);
6627                 if (ret)
6628                         return ret;
6629
6630                 /* then look at all of its lower devices */
6631                 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6632                 if (ret)
6633                         return ret;
6634         }
6635
6636         return 0;
6637 }
6638 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6639
6640 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6641                                                     struct list_head **iter)
6642 {
6643         struct netdev_adjacent *lower;
6644
6645         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6646         if (&lower->list == &dev->adj_list.lower)
6647                 return NULL;
6648
6649         *iter = &lower->list;
6650
6651         return lower->dev;
6652 }
6653
6654 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6655                                   int (*fn)(struct net_device *dev,
6656                                             void *data),
6657                                   void *data)
6658 {
6659         struct net_device *ldev;
6660         struct list_head *iter;
6661         int ret;
6662
6663         for (iter = &dev->adj_list.lower,
6664              ldev = netdev_next_lower_dev_rcu(dev, &iter);
6665              ldev;
6666              ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6667                 /* first is the lower device itself */
6668                 ret = fn(ldev, data);
6669                 if (ret)
6670                         return ret;
6671
6672                 /* then look at all of its lower devices */
6673                 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6674                 if (ret)
6675                         return ret;
6676         }
6677
6678         return 0;
6679 }
6680 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6681
6682 /**
6683  * netdev_lower_get_first_private_rcu - Get the first ->private from the
6684  *                                     lower neighbour list, RCU
6685  *                                     variant
6686  * @dev: device
6687  *
6688  * Gets the first netdev_adjacent->private from the dev's lower neighbour
6689  * list. The caller must hold RCU read lock.
6690  */
6691 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6692 {
6693         struct netdev_adjacent *lower;
6694
6695         lower = list_first_or_null_rcu(&dev->adj_list.lower,
6696                         struct netdev_adjacent, list);
6697         if (lower)
6698                 return lower->private;
6699         return NULL;
6700 }
6701 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6702
6703 /**
6704  * netdev_master_upper_dev_get_rcu - Get master upper device
6705  * @dev: device
6706  *
6707  * Find a master upper device and return pointer to it or NULL in case
6708  * it's not there. The caller must hold the RCU read lock.
6709  */
6710 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6711 {
6712         struct netdev_adjacent *upper;
6713
6714         upper = list_first_or_null_rcu(&dev->adj_list.upper,
6715                                        struct netdev_adjacent, list);
6716         if (upper && likely(upper->master))
6717                 return upper->dev;
6718         return NULL;
6719 }
6720 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6721
6722 static int netdev_adjacent_sysfs_add(struct net_device *dev,
6723                               struct net_device *adj_dev,
6724                               struct list_head *dev_list)
6725 {
6726         char linkname[IFNAMSIZ+7];
6727
6728         sprintf(linkname, dev_list == &dev->adj_list.upper ?
6729                 "upper_%s" : "lower_%s", adj_dev->name);
6730         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6731                                  linkname);
6732 }
6733 static void netdev_adjacent_sysfs_del(struct net_device *dev,
6734                                char *name,
6735                                struct list_head *dev_list)
6736 {
6737         char linkname[IFNAMSIZ+7];
6738
6739         sprintf(linkname, dev_list == &dev->adj_list.upper ?
6740                 "upper_%s" : "lower_%s", name);
6741         sysfs_remove_link(&(dev->dev.kobj), linkname);
6742 }
6743
6744 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6745                                                  struct net_device *adj_dev,
6746                                                  struct list_head *dev_list)
6747 {
6748         return (dev_list == &dev->adj_list.upper ||
6749                 dev_list == &dev->adj_list.lower) &&
6750                 net_eq(dev_net(dev), dev_net(adj_dev));
6751 }
6752
6753 static int __netdev_adjacent_dev_insert(struct net_device *dev,
6754                                         struct net_device *adj_dev,
6755                                         struct list_head *dev_list,
6756                                         void *private, bool master)
6757 {
6758         struct netdev_adjacent *adj;
6759         int ret;
6760
6761         adj = __netdev_find_adj(adj_dev, dev_list);
6762
6763         if (adj) {
6764                 adj->ref_nr += 1;
6765                 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6766                          dev->name, adj_dev->name, adj->ref_nr);
6767
6768                 return 0;
6769         }
6770
6771         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6772         if (!adj)
6773                 return -ENOMEM;
6774
6775         adj->dev = adj_dev;
6776         adj->master = master;
6777         adj->ref_nr = 1;
6778         adj->private = private;
6779         dev_hold(adj_dev);
6780
6781         pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6782                  dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
6783
6784         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
6785                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
6786                 if (ret)
6787                         goto free_adj;
6788         }
6789
6790         /* Ensure that master link is always the first item in list. */
6791         if (master) {
6792                 ret = sysfs_create_link(&(dev->dev.kobj),
6793                                         &(adj_dev->dev.kobj), "master");
6794                 if (ret)
6795                         goto remove_symlinks;
6796
6797                 list_add_rcu(&adj->list, dev_list);
6798         } else {
6799                 list_add_tail_rcu(&adj->list, dev_list);
6800         }
6801
6802         return 0;
6803
6804 remove_symlinks:
6805         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6806                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6807 free_adj:
6808         kfree(adj);
6809         dev_put(adj_dev);
6810
6811         return ret;
6812 }
6813
6814 static void __netdev_adjacent_dev_remove(struct net_device *dev,
6815                                          struct net_device *adj_dev,
6816                                          u16 ref_nr,
6817                                          struct list_head *dev_list)
6818 {
6819         struct netdev_adjacent *adj;
6820
6821         pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6822                  dev->name, adj_dev->name, ref_nr);
6823
6824         adj = __netdev_find_adj(adj_dev, dev_list);
6825
6826         if (!adj) {
6827                 pr_err("Adjacency does not exist for device %s from %s\n",
6828                        dev->name, adj_dev->name);
6829                 WARN_ON(1);
6830                 return;
6831         }
6832
6833         if (adj->ref_nr > ref_nr) {
6834                 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6835                          dev->name, adj_dev->name, ref_nr,
6836                          adj->ref_nr - ref_nr);
6837                 adj->ref_nr -= ref_nr;
6838                 return;
6839         }
6840
6841         if (adj->master)
6842                 sysfs_remove_link(&(dev->dev.kobj), "master");
6843
6844         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6845                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6846
6847         list_del_rcu(&adj->list);
6848         pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6849                  adj_dev->name, dev->name, adj_dev->name);
6850         dev_put(adj_dev);
6851         kfree_rcu(adj, rcu);
6852 }
6853
6854 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6855                                             struct net_device *upper_dev,
6856                                             struct list_head *up_list,
6857                                             struct list_head *down_list,
6858                                             void *private, bool master)
6859 {
6860         int ret;
6861
6862         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6863                                            private, master);
6864         if (ret)
6865                 return ret;
6866
6867         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
6868                                            private, false);
6869         if (ret) {
6870                 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
6871                 return ret;
6872         }
6873
6874         return 0;
6875 }
6876
6877 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6878                                                struct net_device *upper_dev,
6879                                                u16 ref_nr,
6880                                                struct list_head *up_list,
6881                                                struct list_head *down_list)
6882 {
6883         __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6884         __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
6885 }
6886
6887 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6888                                                 struct net_device *upper_dev,
6889                                                 void *private, bool master)
6890 {
6891         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6892                                                 &dev->adj_list.upper,
6893                                                 &upper_dev->adj_list.lower,
6894                                                 private, master);
6895 }
6896
6897 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6898                                                    struct net_device *upper_dev)
6899 {
6900         __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
6901                                            &dev->adj_list.upper,
6902                                            &upper_dev->adj_list.lower);
6903 }
6904
6905 static int __netdev_upper_dev_link(struct net_device *dev,
6906                                    struct net_device *upper_dev, bool master,
6907                                    void *upper_priv, void *upper_info,
6908                                    struct netlink_ext_ack *extack)
6909 {
6910         struct netdev_notifier_changeupper_info changeupper_info = {
6911                 .info = {
6912                         .dev = dev,
6913                         .extack = extack,
6914                 },
6915                 .upper_dev = upper_dev,
6916                 .master = master,
6917                 .linking = true,
6918                 .upper_info = upper_info,
6919         };
6920         struct net_device *master_dev;
6921         int ret = 0;
6922
6923         ASSERT_RTNL();
6924
6925         if (dev == upper_dev)
6926                 return -EBUSY;
6927
6928         /* To prevent loops, check if dev is not upper device to upper_dev. */
6929         if (netdev_has_upper_dev(upper_dev, dev))
6930                 return -EBUSY;
6931
6932         if (!master) {
6933                 if (netdev_has_upper_dev(dev, upper_dev))
6934                         return -EEXIST;
6935         } else {
6936                 master_dev = netdev_master_upper_dev_get(dev);
6937                 if (master_dev)
6938                         return master_dev == upper_dev ? -EEXIST : -EBUSY;
6939         }
6940
6941         ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
6942                                             &changeupper_info.info);
6943         ret = notifier_to_errno(ret);
6944         if (ret)
6945                 return ret;
6946
6947         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
6948                                                    master);
6949         if (ret)
6950                 return ret;
6951
6952         ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
6953                                             &changeupper_info.info);
6954         ret = notifier_to_errno(ret);
6955         if (ret)
6956                 goto rollback;
6957
6958         return 0;
6959
6960 rollback:
6961         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6962
6963         return ret;
6964 }
6965
6966 /**
6967  * netdev_upper_dev_link - Add a link to the upper device
6968  * @dev: device
6969  * @upper_dev: new upper device
6970  * @extack: netlink extended ack
6971  *
6972  * Adds a link to device which is upper to this one. The caller must hold
6973  * the RTNL lock. On a failure a negative errno code is returned.
6974  * On success the reference counts are adjusted and the function
6975  * returns zero.
6976  */
6977 int netdev_upper_dev_link(struct net_device *dev,
6978                           struct net_device *upper_dev,
6979                           struct netlink_ext_ack *extack)
6980 {
6981         return __netdev_upper_dev_link(dev, upper_dev, false,
6982                                        NULL, NULL, extack);
6983 }
6984 EXPORT_SYMBOL(netdev_upper_dev_link);
6985
6986 /**
6987  * netdev_master_upper_dev_link - Add a master link to the upper device
6988  * @dev: device
6989  * @upper_dev: new upper device
6990  * @upper_priv: upper device private
6991  * @upper_info: upper info to be passed down via notifier
6992  * @extack: netlink extended ack
6993  *
6994  * Adds a link to device which is upper to this one. In this case, only
6995  * one master upper device can be linked, although other non-master devices
6996  * might be linked as well. The caller must hold the RTNL lock.
6997  * On a failure a negative errno code is returned. On success the reference
6998  * counts are adjusted and the function returns zero.
6999  */
7000 int netdev_master_upper_dev_link(struct net_device *dev,
7001                                  struct net_device *upper_dev,
7002                                  void *upper_priv, void *upper_info,
7003                                  struct netlink_ext_ack *extack)
7004 {
7005         return __netdev_upper_dev_link(dev, upper_dev, true,
7006                                        upper_priv, upper_info, extack);
7007 }
7008 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7009
7010 /**
7011  * netdev_upper_dev_unlink - Removes a link to upper device
7012  * @dev: device
7013  * @upper_dev: new upper device
7014  *
7015  * Removes a link to device which is upper to this one. The caller must hold
7016  * the RTNL lock.
7017  */
7018 void netdev_upper_dev_unlink(struct net_device *dev,
7019                              struct net_device *upper_dev)
7020 {
7021         struct netdev_notifier_changeupper_info changeupper_info = {
7022                 .info = {
7023                         .dev = dev,
7024                 },
7025                 .upper_dev = upper_dev,
7026                 .linking = false,
7027         };
7028
7029         ASSERT_RTNL();
7030
7031         changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7032
7033         call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7034                                       &changeupper_info.info);
7035
7036         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7037
7038         call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7039                                       &changeupper_info.info);
7040 }
7041 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7042
7043 /**
7044  * netdev_bonding_info_change - Dispatch event about slave change
7045  * @dev: device
7046  * @bonding_info: info to dispatch
7047  *
7048  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7049  * The caller must hold the RTNL lock.
7050  */
7051 void netdev_bonding_info_change(struct net_device *dev,
7052                                 struct netdev_bonding_info *bonding_info)
7053 {
7054         struct netdev_notifier_bonding_info info = {
7055                 .info.dev = dev,
7056         };
7057
7058         memcpy(&info.bonding_info, bonding_info,
7059                sizeof(struct netdev_bonding_info));
7060         call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7061                                       &info.info);
7062 }
7063 EXPORT_SYMBOL(netdev_bonding_info_change);
7064
7065 static void netdev_adjacent_add_links(struct net_device *dev)
7066 {
7067         struct netdev_adjacent *iter;
7068
7069         struct net *net = dev_net(dev);
7070
7071         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7072                 if (!net_eq(net, dev_net(iter->dev)))
7073                         continue;
7074                 netdev_adjacent_sysfs_add(iter->dev, dev,
7075                                           &iter->dev->adj_list.lower);
7076                 netdev_adjacent_sysfs_add(dev, iter->dev,
7077                                           &dev->adj_list.upper);
7078         }
7079
7080         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7081                 if (!net_eq(net, dev_net(iter->dev)))
7082                         continue;
7083                 netdev_adjacent_sysfs_add(iter->dev, dev,
7084                                           &iter->dev->adj_list.upper);
7085                 netdev_adjacent_sysfs_add(dev, iter->dev,
7086                                           &dev->adj_list.lower);
7087         }
7088 }
7089
7090 static void netdev_adjacent_del_links(struct net_device *dev)
7091 {
7092         struct netdev_adjacent *iter;
7093
7094         struct net *net = dev_net(dev);
7095
7096         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7097                 if (!net_eq(net, dev_net(iter->dev)))
7098                         continue;
7099                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7100                                           &iter->dev->adj_list.lower);
7101                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7102                                           &dev->adj_list.upper);
7103         }
7104
7105         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7106                 if (!net_eq(net, dev_net(iter->dev)))
7107                         continue;
7108                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7109                                           &iter->dev->adj_list.upper);
7110                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7111                                           &dev->adj_list.lower);
7112         }
7113 }
7114
7115 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7116 {
7117         struct netdev_adjacent *iter;
7118
7119         struct net *net = dev_net(dev);
7120
7121         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7122                 if (!net_eq(net, dev_net(iter->dev)))
7123                         continue;
7124                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7125                                           &iter->dev->adj_list.lower);
7126                 netdev_adjacent_sysfs_add(iter->dev, dev,
7127                                           &iter->dev->adj_list.lower);
7128         }
7129
7130         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7131                 if (!net_eq(net, dev_net(iter->dev)))
7132                         continue;
7133                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7134                                           &iter->dev->adj_list.upper);
7135                 netdev_adjacent_sysfs_add(iter->dev, dev,
7136                                           &iter->dev->adj_list.upper);
7137         }
7138 }
7139
7140 void *netdev_lower_dev_get_private(struct net_device *dev,
7141                                    struct net_device *lower_dev)
7142 {
7143         struct netdev_adjacent *lower;
7144
7145         if (!lower_dev)
7146                 return NULL;
7147         lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7148         if (!lower)
7149                 return NULL;
7150
7151         return lower->private;
7152 }
7153 EXPORT_SYMBOL(netdev_lower_dev_get_private);
7154
7155
7156 int dev_get_nest_level(struct net_device *dev)
7157 {
7158         struct net_device *lower = NULL;
7159         struct list_head *iter;
7160         int max_nest = -1;
7161         int nest;
7162
7163         ASSERT_RTNL();
7164
7165         netdev_for_each_lower_dev(dev, lower, iter) {
7166                 nest = dev_get_nest_level(lower);
7167                 if (max_nest < nest)
7168                         max_nest = nest;
7169         }
7170
7171         return max_nest + 1;
7172 }
7173 EXPORT_SYMBOL(dev_get_nest_level);
7174
7175 /**
7176  * netdev_lower_change - Dispatch event about lower device state change
7177  * @lower_dev: device
7178  * @lower_state_info: state to dispatch
7179  *
7180  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7181  * The caller must hold the RTNL lock.
7182  */
7183 void netdev_lower_state_changed(struct net_device *lower_dev,
7184                                 void *lower_state_info)
7185 {
7186         struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7187                 .info.dev = lower_dev,
7188         };
7189
7190         ASSERT_RTNL();
7191         changelowerstate_info.lower_state_info = lower_state_info;
7192         call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7193                                       &changelowerstate_info.info);
7194 }
7195 EXPORT_SYMBOL(netdev_lower_state_changed);
7196
7197 static void dev_change_rx_flags(struct net_device *dev, int flags)
7198 {
7199         const struct net_device_ops *ops = dev->netdev_ops;
7200
7201         if (ops->ndo_change_rx_flags)
7202                 ops->ndo_change_rx_flags(dev, flags);
7203 }
7204
7205 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7206 {
7207         unsigned int old_flags = dev->flags;
7208         kuid_t uid;
7209         kgid_t gid;
7210
7211         ASSERT_RTNL();
7212
7213         dev->flags |= IFF_PROMISC;
7214         dev->promiscuity += inc;
7215         if (dev->promiscuity == 0) {
7216                 /*
7217                  * Avoid overflow.
7218                  * If inc causes overflow, untouch promisc and return error.
7219                  */
7220                 if (inc < 0)
7221                         dev->flags &= ~IFF_PROMISC;
7222                 else {
7223                         dev->promiscuity -= inc;
7224                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7225                                 dev->name);
7226                         return -EOVERFLOW;
7227                 }
7228         }
7229         if (dev->flags != old_flags) {
7230                 pr_info("device %s %s promiscuous mode\n",
7231                         dev->name,
7232                         dev->flags & IFF_PROMISC ? "entered" : "left");
7233                 if (audit_enabled) {
7234                         current_uid_gid(&uid, &gid);
7235                         audit_log(audit_context(), GFP_ATOMIC,
7236                                   AUDIT_ANOM_PROMISCUOUS,
7237                                   "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7238                                   dev->name, (dev->flags & IFF_PROMISC),
7239                                   (old_flags & IFF_PROMISC),
7240                                   from_kuid(&init_user_ns, audit_get_loginuid(current)),
7241                                   from_kuid(&init_user_ns, uid),
7242                                   from_kgid(&init_user_ns, gid),
7243                                   audit_get_sessionid(current));
7244                 }
7245
7246                 dev_change_rx_flags(dev, IFF_PROMISC);
7247         }
7248         if (notify)
7249                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
7250         return 0;
7251 }
7252
7253 /**
7254  *      dev_set_promiscuity     - update promiscuity count on a device
7255  *      @dev: device
7256  *      @inc: modifier
7257  *
7258  *      Add or remove promiscuity from a device. While the count in the device
7259  *      remains above zero the interface remains promiscuous. Once it hits zero
7260  *      the device reverts back to normal filtering operation. A negative inc
7261  *      value is used to drop promiscuity on the device.
7262  *      Return 0 if successful or a negative errno code on error.
7263  */
7264 int dev_set_promiscuity(struct net_device *dev, int inc)
7265 {
7266         unsigned int old_flags = dev->flags;
7267         int err;
7268
7269         err = __dev_set_promiscuity(dev, inc, true);
7270         if (err < 0)
7271                 return err;
7272         if (dev->flags != old_flags)
7273                 dev_set_rx_mode(dev);
7274         return err;
7275 }
7276 EXPORT_SYMBOL(dev_set_promiscuity);
7277
7278 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7279 {
7280         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7281
7282         ASSERT_RTNL();
7283
7284         dev->flags |= IFF_ALLMULTI;
7285         dev->allmulti += inc;
7286         if (dev->allmulti == 0) {
7287                 /*
7288                  * Avoid overflow.
7289                  * If inc causes overflow, untouch allmulti and return error.
7290                  */
7291                 if (inc < 0)
7292                         dev->flags &= ~IFF_ALLMULTI;
7293                 else {
7294                         dev->allmulti -= inc;
7295                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7296                                 dev->name);
7297                         return -EOVERFLOW;
7298                 }
7299         }
7300         if (dev->flags ^ old_flags) {
7301                 dev_change_rx_flags(dev, IFF_ALLMULTI);
7302                 dev_set_rx_mode(dev);
7303                 if (notify)
7304                         __dev_notify_flags(dev, old_flags,
7305                                            dev->gflags ^ old_gflags);
7306         }
7307         return 0;
7308 }
7309
7310 /**
7311  *      dev_set_allmulti        - update allmulti count on a device
7312  *      @dev: device
7313  *      @inc: modifier
7314  *
7315  *      Add or remove reception of all multicast frames to a device. While the
7316  *      count in the device remains above zero the interface remains listening
7317  *      to all interfaces. Once it hits zero the device reverts back to normal
7318  *      filtering operation. A negative @inc value is used to drop the counter
7319  *      when releasing a resource needing all multicasts.
7320  *      Return 0 if successful or a negative errno code on error.
7321  */
7322
7323 int dev_set_allmulti(struct net_device *dev, int inc)
7324 {
7325         return __dev_set_allmulti(dev, inc, true);
7326 }
7327 EXPORT_SYMBOL(dev_set_allmulti);
7328
7329 /*
7330  *      Upload unicast and multicast address lists to device and
7331  *      configure RX filtering. When the device doesn't support unicast
7332  *      filtering it is put in promiscuous mode while unicast addresses
7333  *      are present.
7334  */
7335 void __dev_set_rx_mode(struct net_device *dev)
7336 {
7337         const struct net_device_ops *ops = dev->netdev_ops;
7338
7339         /* dev_open will call this function so the list will stay sane. */
7340         if (!(dev->flags&IFF_UP))
7341                 return;
7342
7343         if (!netif_device_present(dev))
7344                 return;
7345
7346         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
7347                 /* Unicast addresses changes may only happen under the rtnl,
7348                  * therefore calling __dev_set_promiscuity here is safe.
7349                  */
7350                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
7351                         __dev_set_promiscuity(dev, 1, false);
7352                         dev->uc_promisc = true;
7353                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
7354                         __dev_set_promiscuity(dev, -1, false);
7355                         dev->uc_promisc = false;
7356                 }
7357         }
7358
7359         if (ops->ndo_set_rx_mode)
7360                 ops->ndo_set_rx_mode(dev);
7361 }
7362
7363 void dev_set_rx_mode(struct net_device *dev)
7364 {
7365         netif_addr_lock_bh(dev);
7366         __dev_set_rx_mode(dev);
7367         netif_addr_unlock_bh(dev);
7368 }
7369
7370 /**
7371  *      dev_get_flags - get flags reported to userspace
7372  *      @dev: device
7373  *
7374  *      Get the combination of flag bits exported through APIs to userspace.
7375  */
7376 unsigned int dev_get_flags(const struct net_device *dev)
7377 {
7378         unsigned int flags;
7379
7380         flags = (dev->flags & ~(IFF_PROMISC |
7381                                 IFF_ALLMULTI |
7382                                 IFF_RUNNING |
7383                                 IFF_LOWER_UP |
7384                                 IFF_DORMANT)) |
7385                 (dev->gflags & (IFF_PROMISC |
7386                                 IFF_ALLMULTI));
7387
7388         if (netif_running(dev)) {
7389                 if (netif_oper_up(dev))
7390                         flags |= IFF_RUNNING;
7391                 if (netif_carrier_ok(dev))
7392                         flags |= IFF_LOWER_UP;
7393                 if (netif_dormant(dev))
7394                         flags |= IFF_DORMANT;
7395         }
7396
7397         return flags;
7398 }
7399 EXPORT_SYMBOL(dev_get_flags);
7400
7401 int __dev_change_flags(struct net_device *dev, unsigned int flags)
7402 {
7403         unsigned int old_flags = dev->flags;
7404         int ret;
7405
7406         ASSERT_RTNL();
7407
7408         /*
7409          *      Set the flags on our device.
7410          */
7411
7412         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7413                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7414                                IFF_AUTOMEDIA)) |
7415                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7416                                     IFF_ALLMULTI));
7417
7418         /*
7419          *      Load in the correct multicast list now the flags have changed.
7420          */
7421
7422         if ((old_flags ^ flags) & IFF_MULTICAST)
7423                 dev_change_rx_flags(dev, IFF_MULTICAST);
7424
7425         dev_set_rx_mode(dev);
7426
7427         /*
7428          *      Have we downed the interface. We handle IFF_UP ourselves
7429          *      according to user attempts to set it, rather than blindly
7430          *      setting it.
7431          */
7432
7433         ret = 0;
7434         if ((old_flags ^ flags) & IFF_UP) {
7435                 if (old_flags & IFF_UP)
7436                         __dev_close(dev);
7437                 else
7438                         ret = __dev_open(dev);
7439         }
7440
7441         if ((flags ^ dev->gflags) & IFF_PROMISC) {
7442                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
7443                 unsigned int old_flags = dev->flags;
7444
7445                 dev->gflags ^= IFF_PROMISC;
7446
7447                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7448                         if (dev->flags != old_flags)
7449                                 dev_set_rx_mode(dev);
7450         }
7451
7452         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7453          * is important. Some (broken) drivers set IFF_PROMISC, when
7454          * IFF_ALLMULTI is requested not asking us and not reporting.
7455          */
7456         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
7457                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7458
7459                 dev->gflags ^= IFF_ALLMULTI;
7460                 __dev_set_allmulti(dev, inc, false);
7461         }
7462
7463         return ret;
7464 }
7465
7466 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7467                         unsigned int gchanges)
7468 {
7469         unsigned int changes = dev->flags ^ old_flags;
7470
7471         if (gchanges)
7472                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
7473
7474         if (changes & IFF_UP) {
7475                 if (dev->flags & IFF_UP)
7476                         call_netdevice_notifiers(NETDEV_UP, dev);
7477                 else
7478                         call_netdevice_notifiers(NETDEV_DOWN, dev);
7479         }
7480
7481         if (dev->flags & IFF_UP &&
7482             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
7483                 struct netdev_notifier_change_info change_info = {
7484                         .info = {
7485                                 .dev = dev,
7486                         },
7487                         .flags_changed = changes,
7488                 };
7489
7490                 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
7491         }
7492 }
7493
7494 /**
7495  *      dev_change_flags - change device settings
7496  *      @dev: device
7497  *      @flags: device state flags
7498  *
7499  *      Change settings on device based state flags. The flags are
7500  *      in the userspace exported format.
7501  */
7502 int dev_change_flags(struct net_device *dev, unsigned int flags)
7503 {
7504         int ret;
7505         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
7506
7507         ret = __dev_change_flags(dev, flags);
7508         if (ret < 0)
7509                 return ret;
7510
7511         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
7512         __dev_notify_flags(dev, old_flags, changes);
7513         return ret;
7514 }
7515 EXPORT_SYMBOL(dev_change_flags);
7516
7517 int __dev_set_mtu(struct net_device *dev, int new_mtu)
7518 {
7519         const struct net_device_ops *ops = dev->netdev_ops;
7520
7521         if (ops->ndo_change_mtu)
7522                 return ops->ndo_change_mtu(dev, new_mtu);
7523
7524         dev->mtu = new_mtu;
7525         return 0;
7526 }
7527 EXPORT_SYMBOL(__dev_set_mtu);
7528
7529 /**
7530  *      dev_set_mtu_ext - Change maximum transfer unit
7531  *      @dev: device
7532  *      @new_mtu: new transfer unit
7533  *      @extack: netlink extended ack
7534  *
7535  *      Change the maximum transfer size of the network device.
7536  */
7537 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
7538                     struct netlink_ext_ack *extack)
7539 {
7540         int err, orig_mtu;
7541
7542         if (new_mtu == dev->mtu)
7543                 return 0;
7544
7545         /* MTU must be positive, and in range */
7546         if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7547                 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
7548                 return -EINVAL;
7549         }
7550
7551         if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7552                 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
7553                 return -EINVAL;
7554         }
7555
7556         if (!netif_device_present(dev))
7557                 return -ENODEV;
7558
7559         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7560         err = notifier_to_errno(err);
7561         if (err)
7562                 return err;
7563
7564         orig_mtu = dev->mtu;
7565         err = __dev_set_mtu(dev, new_mtu);
7566
7567         if (!err) {
7568                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7569                 err = notifier_to_errno(err);
7570                 if (err) {
7571                         /* setting mtu back and notifying everyone again,
7572                          * so that they have a chance to revert changes.
7573                          */
7574                         __dev_set_mtu(dev, orig_mtu);
7575                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7576                 }
7577         }
7578         return err;
7579 }
7580
7581 int dev_set_mtu(struct net_device *dev, int new_mtu)
7582 {
7583         struct netlink_ext_ack extack;
7584         int err;
7585
7586         err = dev_set_mtu_ext(dev, new_mtu, &extack);
7587         if (err)
7588                 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
7589         return err;
7590 }
7591 EXPORT_SYMBOL(dev_set_mtu);
7592
7593 /**
7594  *      dev_change_tx_queue_len - Change TX queue length of a netdevice
7595  *      @dev: device
7596  *      @new_len: new tx queue length
7597  */
7598 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7599 {
7600         unsigned int orig_len = dev->tx_queue_len;
7601         int res;
7602
7603         if (new_len != (unsigned int)new_len)
7604                 return -ERANGE;
7605
7606         if (new_len != orig_len) {
7607                 dev->tx_queue_len = new_len;
7608                 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7609                 res = notifier_to_errno(res);
7610                 if (res)
7611                         goto err_rollback;
7612                 res = dev_qdisc_change_tx_queue_len(dev);
7613                 if (res)
7614                         goto err_rollback;
7615         }
7616
7617         return 0;
7618
7619 err_rollback:
7620         netdev_err(dev, "refused to change device tx_queue_len\n");
7621         dev->tx_queue_len = orig_len;
7622         return res;
7623 }
7624
7625 /**
7626  *      dev_set_group - Change group this device belongs to
7627  *      @dev: device
7628  *      @new_group: group this device should belong to
7629  */
7630 void dev_set_group(struct net_device *dev, int new_group)
7631 {
7632         dev->group = new_group;
7633 }
7634 EXPORT_SYMBOL(dev_set_group);
7635
7636 /**
7637  *      dev_set_mac_address - Change Media Access Control Address
7638  *      @dev: device
7639  *      @sa: new address
7640  *
7641  *      Change the hardware (MAC) address of the device
7642  */
7643 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
7644 {
7645         const struct net_device_ops *ops = dev->netdev_ops;
7646         int err;
7647
7648         if (!ops->ndo_set_mac_address)
7649                 return -EOPNOTSUPP;
7650         if (sa->sa_family != dev->type)
7651                 return -EINVAL;
7652         if (!netif_device_present(dev))
7653                 return -ENODEV;
7654         err = ops->ndo_set_mac_address(dev, sa);
7655         if (err)
7656                 return err;
7657         dev->addr_assign_type = NET_ADDR_SET;
7658         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7659         add_device_randomness(dev->dev_addr, dev->addr_len);
7660         return 0;
7661 }
7662 EXPORT_SYMBOL(dev_set_mac_address);
7663
7664 /**
7665  *      dev_change_carrier - Change device carrier
7666  *      @dev: device
7667  *      @new_carrier: new value
7668  *
7669  *      Change device carrier
7670  */
7671 int dev_change_carrier(struct net_device *dev, bool new_carrier)
7672 {
7673         const struct net_device_ops *ops = dev->netdev_ops;
7674
7675         if (!ops->ndo_change_carrier)
7676                 return -EOPNOTSUPP;
7677         if (!netif_device_present(dev))
7678                 return -ENODEV;
7679         return ops->ndo_change_carrier(dev, new_carrier);
7680 }
7681 EXPORT_SYMBOL(dev_change_carrier);
7682
7683 /**
7684  *      dev_get_phys_port_id - Get device physical port ID
7685  *      @dev: device
7686  *      @ppid: port ID
7687  *
7688  *      Get device physical port ID
7689  */
7690 int dev_get_phys_port_id(struct net_device *dev,
7691                          struct netdev_phys_item_id *ppid)
7692 {
7693         const struct net_device_ops *ops = dev->netdev_ops;
7694
7695         if (!ops->ndo_get_phys_port_id)
7696                 return -EOPNOTSUPP;
7697         return ops->ndo_get_phys_port_id(dev, ppid);
7698 }
7699 EXPORT_SYMBOL(dev_get_phys_port_id);
7700
7701 /**
7702  *      dev_get_phys_port_name - Get device physical port name
7703  *      @dev: device
7704  *      @name: port name
7705  *      @len: limit of bytes to copy to name
7706  *
7707  *      Get device physical port name
7708  */
7709 int dev_get_phys_port_name(struct net_device *dev,
7710                            char *name, size_t len)
7711 {
7712         const struct net_device_ops *ops = dev->netdev_ops;
7713
7714         if (!ops->ndo_get_phys_port_name)
7715                 return -EOPNOTSUPP;
7716         return ops->ndo_get_phys_port_name(dev, name, len);
7717 }
7718 EXPORT_SYMBOL(dev_get_phys_port_name);
7719
7720 /**
7721  *      dev_change_proto_down - update protocol port state information
7722  *      @dev: device
7723  *      @proto_down: new value
7724  *
7725  *      This info can be used by switch drivers to set the phys state of the
7726  *      port.
7727  */
7728 int dev_change_proto_down(struct net_device *dev, bool proto_down)
7729 {
7730         const struct net_device_ops *ops = dev->netdev_ops;
7731
7732         if (!ops->ndo_change_proto_down)
7733                 return -EOPNOTSUPP;
7734         if (!netif_device_present(dev))
7735                 return -ENODEV;
7736         return ops->ndo_change_proto_down(dev, proto_down);
7737 }
7738 EXPORT_SYMBOL(dev_change_proto_down);
7739
7740 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7741                     enum bpf_netdev_command cmd)
7742 {
7743         struct netdev_bpf xdp;
7744
7745         if (!bpf_op)
7746                 return 0;
7747
7748         memset(&xdp, 0, sizeof(xdp));
7749         xdp.command = cmd;
7750
7751         /* Query must always succeed. */
7752         WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
7753
7754         return xdp.prog_id;
7755 }
7756
7757 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
7758                            struct netlink_ext_ack *extack, u32 flags,
7759                            struct bpf_prog *prog)
7760 {
7761         struct netdev_bpf xdp;
7762
7763         memset(&xdp, 0, sizeof(xdp));
7764         if (flags & XDP_FLAGS_HW_MODE)
7765                 xdp.command = XDP_SETUP_PROG_HW;
7766         else
7767                 xdp.command = XDP_SETUP_PROG;
7768         xdp.extack = extack;
7769         xdp.flags = flags;
7770         xdp.prog = prog;
7771
7772         return bpf_op(dev, &xdp);
7773 }
7774
7775 static void dev_xdp_uninstall(struct net_device *dev)
7776 {
7777         struct netdev_bpf xdp;
7778         bpf_op_t ndo_bpf;
7779
7780         /* Remove generic XDP */
7781         WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
7782
7783         /* Remove from the driver */
7784         ndo_bpf = dev->netdev_ops->ndo_bpf;
7785         if (!ndo_bpf)
7786                 return;
7787
7788         memset(&xdp, 0, sizeof(xdp));
7789         xdp.command = XDP_QUERY_PROG;
7790         WARN_ON(ndo_bpf(dev, &xdp));
7791         if (xdp.prog_id)
7792                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7793                                         NULL));
7794
7795         /* Remove HW offload */
7796         memset(&xdp, 0, sizeof(xdp));
7797         xdp.command = XDP_QUERY_PROG_HW;
7798         if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
7799                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7800                                         NULL));
7801 }
7802
7803 /**
7804  *      dev_change_xdp_fd - set or clear a bpf program for a device rx path
7805  *      @dev: device
7806  *      @extack: netlink extended ack
7807  *      @fd: new program fd or negative value to clear
7808  *      @flags: xdp-related flags
7809  *
7810  *      Set or clear a bpf program for a device
7811  */
7812 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7813                       int fd, u32 flags)
7814 {
7815         const struct net_device_ops *ops = dev->netdev_ops;
7816         enum bpf_netdev_command query;
7817         struct bpf_prog *prog = NULL;
7818         bpf_op_t bpf_op, bpf_chk;
7819         int err;
7820
7821         ASSERT_RTNL();
7822
7823         query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
7824
7825         bpf_op = bpf_chk = ops->ndo_bpf;
7826         if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
7827                 return -EOPNOTSUPP;
7828         if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7829                 bpf_op = generic_xdp_install;
7830         if (bpf_op == bpf_chk)
7831                 bpf_chk = generic_xdp_install;
7832
7833         if (fd >= 0) {
7834                 if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
7835                     __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
7836                         return -EEXIST;
7837                 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
7838                     __dev_xdp_query(dev, bpf_op, query))
7839                         return -EBUSY;
7840
7841                 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7842                                              bpf_op == ops->ndo_bpf);
7843                 if (IS_ERR(prog))
7844                         return PTR_ERR(prog);
7845
7846                 if (!(flags & XDP_FLAGS_HW_MODE) &&
7847                     bpf_prog_is_dev_bound(prog->aux)) {
7848                         NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7849                         bpf_prog_put(prog);
7850                         return -EINVAL;
7851                 }
7852         }
7853
7854         err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
7855         if (err < 0 && prog)
7856                 bpf_prog_put(prog);
7857
7858         return err;
7859 }
7860
7861 /**
7862  *      dev_new_index   -       allocate an ifindex
7863  *      @net: the applicable net namespace
7864  *
7865  *      Returns a suitable unique value for a new device interface
7866  *      number.  The caller must hold the rtnl semaphore or the
7867  *      dev_base_lock to be sure it remains unique.
7868  */
7869 static int dev_new_index(struct net *net)
7870 {
7871         int ifindex = net->ifindex;
7872
7873         for (;;) {
7874                 if (++ifindex <= 0)
7875                         ifindex = 1;
7876                 if (!__dev_get_by_index(net, ifindex))
7877                         return net->ifindex = ifindex;
7878         }
7879 }
7880
7881 /* Delayed registration/unregisteration */
7882 static LIST_HEAD(net_todo_list);
7883 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
7884
7885 static void net_set_todo(struct net_device *dev)
7886 {
7887         list_add_tail(&dev->todo_list, &net_todo_list);
7888         dev_net(dev)->dev_unreg_count++;
7889 }
7890
7891 static void rollback_registered_many(struct list_head *head)
7892 {
7893         struct net_device *dev, *tmp;
7894         LIST_HEAD(close_head);
7895
7896         BUG_ON(dev_boot_phase);
7897         ASSERT_RTNL();
7898
7899         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
7900                 /* Some devices call without registering
7901                  * for initialization unwind. Remove those
7902                  * devices and proceed with the remaining.
7903                  */
7904                 if (dev->reg_state == NETREG_UNINITIALIZED) {
7905                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7906                                  dev->name, dev);
7907
7908                         WARN_ON(1);
7909                         list_del(&dev->unreg_list);
7910                         continue;
7911                 }
7912                 dev->dismantle = true;
7913                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
7914         }
7915
7916         /* If device is running, close it first. */
7917         list_for_each_entry(dev, head, unreg_list)
7918                 list_add_tail(&dev->close_list, &close_head);
7919         dev_close_many(&close_head, true);
7920
7921         list_for_each_entry(dev, head, unreg_list) {
7922                 /* And unlink it from device chain. */
7923                 unlist_netdevice(dev);
7924
7925                 dev->reg_state = NETREG_UNREGISTERING;
7926         }
7927         flush_all_backlogs();
7928
7929         synchronize_net();
7930
7931         list_for_each_entry(dev, head, unreg_list) {
7932                 struct sk_buff *skb = NULL;
7933
7934                 /* Shutdown queueing discipline. */
7935                 dev_shutdown(dev);
7936
7937                 dev_xdp_uninstall(dev);
7938
7939                 /* Notify protocols, that we are about to destroy
7940                  * this device. They should clean all the things.
7941                  */
7942                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7943
7944                 if (!dev->rtnl_link_ops ||
7945                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7946                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
7947                                                      GFP_KERNEL, NULL, 0);
7948
7949                 /*
7950                  *      Flush the unicast and multicast chains
7951                  */
7952                 dev_uc_flush(dev);
7953                 dev_mc_flush(dev);
7954
7955                 if (dev->netdev_ops->ndo_uninit)
7956                         dev->netdev_ops->ndo_uninit(dev);
7957
7958                 if (skb)
7959                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
7960
7961                 /* Notifier chain MUST detach us all upper devices. */
7962                 WARN_ON(netdev_has_any_upper_dev(dev));
7963                 WARN_ON(netdev_has_any_lower_dev(dev));
7964
7965                 /* Remove entries from kobject tree */
7966                 netdev_unregister_kobject(dev);
7967 #ifdef CONFIG_XPS
7968                 /* Remove XPS queueing entries */
7969                 netif_reset_xps_queues_gt(dev, 0);
7970 #endif
7971         }
7972
7973         synchronize_net();
7974
7975         list_for_each_entry(dev, head, unreg_list)
7976                 dev_put(dev);
7977 }
7978
7979 static void rollback_registered(struct net_device *dev)
7980 {
7981         LIST_HEAD(single);
7982
7983         list_add(&dev->unreg_list, &single);
7984         rollback_registered_many(&single);
7985         list_del(&single);
7986 }
7987
7988 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7989         struct net_device *upper, netdev_features_t features)
7990 {
7991         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7992         netdev_features_t feature;
7993         int feature_bit;
7994
7995         for_each_netdev_feature(&upper_disables, feature_bit) {
7996                 feature = __NETIF_F_BIT(feature_bit);
7997                 if (!(upper->wanted_features & feature)
7998                     && (features & feature)) {
7999                         netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
8000                                    &feature, upper->name);
8001                         features &= ~feature;
8002                 }
8003         }
8004
8005         return features;
8006 }
8007
8008 static void netdev_sync_lower_features(struct net_device *upper,
8009         struct net_device *lower, netdev_features_t features)
8010 {
8011         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
8012         netdev_features_t feature;
8013         int feature_bit;
8014
8015         for_each_netdev_feature(&upper_disables, feature_bit) {
8016                 feature = __NETIF_F_BIT(feature_bit);
8017                 if (!(features & feature) && (lower->features & feature)) {
8018                         netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
8019                                    &feature, lower->name);
8020                         lower->wanted_features &= ~feature;
8021                         netdev_update_features(lower);
8022
8023                         if (unlikely(lower->features & feature))
8024                                 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8025                                             &feature, lower->name);
8026                 }
8027         }
8028 }
8029
8030 static netdev_features_t netdev_fix_features(struct net_device *dev,
8031         netdev_features_t features)
8032 {
8033         /* Fix illegal checksum combinations */
8034         if ((features & NETIF_F_HW_CSUM) &&
8035             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
8036                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
8037                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8038         }
8039
8040         /* TSO requires that SG is present as well. */
8041         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
8042                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
8043                 features &= ~NETIF_F_ALL_TSO;
8044         }
8045
8046         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8047                                         !(features & NETIF_F_IP_CSUM)) {
8048                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8049                 features &= ~NETIF_F_TSO;
8050                 features &= ~NETIF_F_TSO_ECN;
8051         }
8052
8053         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8054                                          !(features & NETIF_F_IPV6_CSUM)) {
8055                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8056                 features &= ~NETIF_F_TSO6;
8057         }
8058
8059         /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8060         if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8061                 features &= ~NETIF_F_TSO_MANGLEID;
8062
8063         /* TSO ECN requires that TSO is present as well. */
8064         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8065                 features &= ~NETIF_F_TSO_ECN;
8066
8067         /* Software GSO depends on SG. */
8068         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
8069                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
8070                 features &= ~NETIF_F_GSO;
8071         }
8072
8073         /* GSO partial features require GSO partial be set */
8074         if ((features & dev->gso_partial_features) &&
8075             !(features & NETIF_F_GSO_PARTIAL)) {
8076                 netdev_dbg(dev,
8077                            "Dropping partially supported GSO features since no GSO partial.\n");
8078                 features &= ~dev->gso_partial_features;
8079         }
8080
8081         if (!(features & NETIF_F_RXCSUM)) {
8082                 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8083                  * successfully merged by hardware must also have the
8084                  * checksum verified by hardware.  If the user does not
8085                  * want to enable RXCSUM, logically, we should disable GRO_HW.
8086                  */
8087                 if (features & NETIF_F_GRO_HW) {
8088                         netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8089                         features &= ~NETIF_F_GRO_HW;
8090                 }
8091         }
8092
8093         /* LRO/HW-GRO features cannot be combined with RX-FCS */
8094         if (features & NETIF_F_RXFCS) {
8095                 if (features & NETIF_F_LRO) {
8096                         netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8097                         features &= ~NETIF_F_LRO;
8098                 }
8099
8100                 if (features & NETIF_F_GRO_HW) {
8101                         netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8102                         features &= ~NETIF_F_GRO_HW;
8103                 }
8104         }
8105
8106         return features;
8107 }
8108
8109 int __netdev_update_features(struct net_device *dev)
8110 {
8111         struct net_device *upper, *lower;
8112         netdev_features_t features;
8113         struct list_head *iter;
8114         int err = -1;
8115
8116         ASSERT_RTNL();
8117
8118         features = netdev_get_wanted_features(dev);
8119
8120         if (dev->netdev_ops->ndo_fix_features)
8121                 features = dev->netdev_ops->ndo_fix_features(dev, features);
8122
8123         /* driver might be less strict about feature dependencies */
8124         features = netdev_fix_features(dev, features);
8125
8126         /* some features can't be enabled if they're off an an upper device */
8127         netdev_for_each_upper_dev_rcu(dev, upper, iter)
8128                 features = netdev_sync_upper_features(dev, upper, features);
8129
8130         if (dev->features == features)
8131                 goto sync_lower;
8132
8133         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
8134                 &dev->features, &features);
8135
8136         if (dev->netdev_ops->ndo_set_features)
8137                 err = dev->netdev_ops->ndo_set_features(dev, features);
8138         else
8139                 err = 0;
8140
8141         if (unlikely(err < 0)) {
8142                 netdev_err(dev,
8143                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
8144                         err, &features, &dev->features);
8145                 /* return non-0 since some features might have changed and
8146                  * it's better to fire a spurious notification than miss it
8147                  */
8148                 return -1;
8149         }
8150
8151 sync_lower:
8152         /* some features must be disabled on lower devices when disabled
8153          * on an upper device (think: bonding master or bridge)
8154          */
8155         netdev_for_each_lower_dev(dev, lower, iter)
8156                 netdev_sync_lower_features(dev, lower, features);
8157
8158         if (!err) {
8159                 netdev_features_t diff = features ^ dev->features;
8160
8161                 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
8162                         /* udp_tunnel_{get,drop}_rx_info both need
8163                          * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8164                          * device, or they won't do anything.
8165                          * Thus we need to update dev->features
8166                          * *before* calling udp_tunnel_get_rx_info,
8167                          * but *after* calling udp_tunnel_drop_rx_info.
8168                          */
8169                         if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8170                                 dev->features = features;
8171                                 udp_tunnel_get_rx_info(dev);
8172                         } else {
8173                                 udp_tunnel_drop_rx_info(dev);
8174                         }
8175                 }
8176
8177                 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8178                         if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8179                                 dev->features = features;
8180                                 err |= vlan_get_rx_ctag_filter_info(dev);
8181                         } else {
8182                                 vlan_drop_rx_ctag_filter_info(dev);
8183                         }
8184                 }
8185
8186                 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8187                         if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8188                                 dev->features = features;
8189                                 err |= vlan_get_rx_stag_filter_info(dev);
8190                         } else {
8191                                 vlan_drop_rx_stag_filter_info(dev);
8192                         }
8193                 }
8194
8195                 dev->features = features;
8196         }
8197
8198         return err < 0 ? 0 : 1;
8199 }
8200
8201 /**
8202  *      netdev_update_features - recalculate device features
8203  *      @dev: the device to check
8204  *
8205  *      Recalculate dev->features set and send notifications if it
8206  *      has changed. Should be called after driver or hardware dependent
8207  *      conditions might have changed that influence the features.
8208  */
8209 void netdev_update_features(struct net_device *dev)
8210 {
8211         if (__netdev_update_features(dev))
8212                 netdev_features_change(dev);
8213 }
8214 EXPORT_SYMBOL(netdev_update_features);
8215
8216 /**
8217  *      netdev_change_features - recalculate device features
8218  *      @dev: the device to check
8219  *
8220  *      Recalculate dev->features set and send notifications even
8221  *      if they have not changed. Should be called instead of
8222  *      netdev_update_features() if also dev->vlan_features might
8223  *      have changed to allow the changes to be propagated to stacked
8224  *      VLAN devices.
8225  */
8226 void netdev_change_features(struct net_device *dev)
8227 {
8228         __netdev_update_features(dev);
8229         netdev_features_change(dev);
8230 }
8231 EXPORT_SYMBOL(netdev_change_features);
8232
8233 /**
8234  *      netif_stacked_transfer_operstate -      transfer operstate
8235  *      @rootdev: the root or lower level device to transfer state from
8236  *      @dev: the device to transfer operstate to
8237  *
8238  *      Transfer operational state from root to device. This is normally
8239  *      called when a stacking relationship exists between the root
8240  *      device and the device(a leaf device).
8241  */
8242 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8243                                         struct net_device *dev)
8244 {
8245         if (rootdev->operstate == IF_OPER_DORMANT)
8246                 netif_dormant_on(dev);
8247         else
8248                 netif_dormant_off(dev);
8249
8250         if (netif_carrier_ok(rootdev))
8251                 netif_carrier_on(dev);
8252         else
8253                 netif_carrier_off(dev);
8254 }
8255 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8256
8257 static int netif_alloc_rx_queues(struct net_device *dev)
8258 {
8259         unsigned int i, count = dev->num_rx_queues;
8260         struct netdev_rx_queue *rx;
8261         size_t sz = count * sizeof(*rx);
8262         int err = 0;
8263
8264         BUG_ON(count < 1);
8265
8266         rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8267         if (!rx)
8268                 return -ENOMEM;
8269
8270         dev->_rx = rx;
8271
8272         for (i = 0; i < count; i++) {
8273                 rx[i].dev = dev;
8274
8275                 /* XDP RX-queue setup */
8276                 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8277                 if (err < 0)
8278                         goto err_rxq_info;
8279         }
8280         return 0;
8281
8282 err_rxq_info:
8283         /* Rollback successful reg's and free other resources */
8284         while (i--)
8285                 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
8286         kvfree(dev->_rx);
8287         dev->_rx = NULL;
8288         return err;
8289 }
8290
8291 static void netif_free_rx_queues(struct net_device *dev)
8292 {
8293         unsigned int i, count = dev->num_rx_queues;
8294
8295         /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8296         if (!dev->_rx)
8297                 return;
8298
8299         for (i = 0; i < count; i++)
8300                 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8301
8302         kvfree(dev->_rx);
8303 }
8304
8305 static void netdev_init_one_queue(struct net_device *dev,
8306                                   struct netdev_queue *queue, void *_unused)
8307 {
8308         /* Initialize queue lock */
8309         spin_lock_init(&queue->_xmit_lock);
8310         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8311         queue->xmit_lock_owner = -1;
8312         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8313         queue->dev = dev;
8314 #ifdef CONFIG_BQL
8315         dql_init(&queue->dql, HZ);
8316 #endif
8317 }
8318
8319 static void netif_free_tx_queues(struct net_device *dev)
8320 {
8321         kvfree(dev->_tx);
8322 }
8323
8324 static int netif_alloc_netdev_queues(struct net_device *dev)
8325 {
8326         unsigned int count = dev->num_tx_queues;
8327         struct netdev_queue *tx;
8328         size_t sz = count * sizeof(*tx);
8329
8330         if (count < 1 || count > 0xffff)
8331                 return -EINVAL;
8332
8333         tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8334         if (!tx)
8335                 return -ENOMEM;
8336
8337         dev->_tx = tx;
8338
8339         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8340         spin_lock_init(&dev->tx_global_lock);
8341
8342         return 0;
8343 }
8344
8345 void netif_tx_stop_all_queues(struct net_device *dev)
8346 {
8347         unsigned int i;
8348
8349         for (i = 0; i < dev->num_tx_queues; i++) {
8350                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
8351
8352                 netif_tx_stop_queue(txq);
8353         }
8354 }
8355 EXPORT_SYMBOL(netif_tx_stop_all_queues);
8356
8357 /**
8358  *      register_netdevice      - register a network device
8359  *      @dev: device to register
8360  *
8361  *      Take a completed network device structure and add it to the kernel
8362  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8363  *      chain. 0 is returned on success. A negative errno code is returned
8364  *      on a failure to set up the device, or if the name is a duplicate.
8365  *
8366  *      Callers must hold the rtnl semaphore. You may want
8367  *      register_netdev() instead of this.
8368  *
8369  *      BUGS:
8370  *      The locking appears insufficient to guarantee two parallel registers
8371  *      will not get the same name.
8372  */
8373
8374 int register_netdevice(struct net_device *dev)
8375 {
8376         int ret;
8377         struct net *net = dev_net(dev);
8378
8379         BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8380                      NETDEV_FEATURE_COUNT);
8381         BUG_ON(dev_boot_phase);
8382         ASSERT_RTNL();
8383
8384         might_sleep();
8385
8386         /* When net_device's are persistent, this will be fatal. */
8387         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
8388         BUG_ON(!net);
8389
8390         spin_lock_init(&dev->addr_list_lock);
8391         netdev_set_addr_lockdep_class(dev);
8392
8393         ret = dev_get_valid_name(net, dev, dev->name);
8394         if (ret < 0)
8395                 goto out;
8396
8397         /* Init, if this function is available */
8398         if (dev->netdev_ops->ndo_init) {
8399                 ret = dev->netdev_ops->ndo_init(dev);
8400                 if (ret) {
8401                         if (ret > 0)
8402                                 ret = -EIO;
8403                         goto out;
8404                 }
8405         }
8406
8407         if (((dev->hw_features | dev->features) &
8408              NETIF_F_HW_VLAN_CTAG_FILTER) &&
8409             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8410              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8411                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8412                 ret = -EINVAL;
8413                 goto err_uninit;
8414         }
8415
8416         ret = -EBUSY;
8417         if (!dev->ifindex)
8418                 dev->ifindex = dev_new_index(net);
8419         else if (__dev_get_by_index(net, dev->ifindex))
8420                 goto err_uninit;
8421
8422         /* Transfer changeable features to wanted_features and enable
8423          * software offloads (GSO and GRO).
8424          */
8425         dev->hw_features |= NETIF_F_SOFT_FEATURES;
8426         dev->features |= NETIF_F_SOFT_FEATURES;
8427
8428         if (dev->netdev_ops->ndo_udp_tunnel_add) {
8429                 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8430                 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8431         }
8432
8433         dev->wanted_features = dev->features & dev->hw_features;
8434
8435         if (!(dev->flags & IFF_LOOPBACK))
8436                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
8437
8438         /* If IPv4 TCP segmentation offload is supported we should also
8439          * allow the device to enable segmenting the frame with the option
8440          * of ignoring a static IP ID value.  This doesn't enable the
8441          * feature itself but allows the user to enable it later.
8442          */
8443         if (dev->hw_features & NETIF_F_TSO)
8444                 dev->hw_features |= NETIF_F_TSO_MANGLEID;
8445         if (dev->vlan_features & NETIF_F_TSO)
8446                 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8447         if (dev->mpls_features & NETIF_F_TSO)
8448                 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8449         if (dev->hw_enc_features & NETIF_F_TSO)
8450                 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
8451
8452         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
8453          */
8454         dev->vlan_features |= NETIF_F_HIGHDMA;
8455
8456         /* Make NETIF_F_SG inheritable to tunnel devices.
8457          */
8458         dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
8459
8460         /* Make NETIF_F_SG inheritable to MPLS.
8461          */
8462         dev->mpls_features |= NETIF_F_SG;
8463
8464         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8465         ret = notifier_to_errno(ret);
8466         if (ret)
8467                 goto err_uninit;
8468
8469         ret = netdev_register_kobject(dev);
8470         if (ret)
8471                 goto err_uninit;
8472         dev->reg_state = NETREG_REGISTERED;
8473
8474         __netdev_update_features(dev);
8475
8476         /*
8477          *      Default initial state at registry is that the
8478          *      device is present.
8479          */
8480
8481         set_bit(__LINK_STATE_PRESENT, &dev->state);
8482
8483         linkwatch_init_dev(dev);
8484
8485         dev_init_scheduler(dev);
8486         dev_hold(dev);
8487         list_netdevice(dev);
8488         add_device_randomness(dev->dev_addr, dev->addr_len);
8489
8490         /* If the device has permanent device address, driver should
8491          * set dev_addr and also addr_assign_type should be set to
8492          * NET_ADDR_PERM (default value).
8493          */
8494         if (dev->addr_assign_type == NET_ADDR_PERM)
8495                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8496
8497         /* Notify protocols, that a new device appeared. */
8498         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
8499         ret = notifier_to_errno(ret);
8500         if (ret) {
8501                 rollback_registered(dev);
8502                 dev->reg_state = NETREG_UNREGISTERED;
8503         }
8504         /*
8505          *      Prevent userspace races by waiting until the network
8506          *      device is fully setup before sending notifications.
8507          */
8508         if (!dev->rtnl_link_ops ||
8509             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8510                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8511
8512 out:
8513         return ret;
8514
8515 err_uninit:
8516         if (dev->netdev_ops->ndo_uninit)
8517                 dev->netdev_ops->ndo_uninit(dev);
8518         if (dev->priv_destructor)
8519                 dev->priv_destructor(dev);
8520         goto out;
8521 }
8522 EXPORT_SYMBOL(register_netdevice);
8523
8524 /**
8525  *      init_dummy_netdev       - init a dummy network device for NAPI
8526  *      @dev: device to init
8527  *
8528  *      This takes a network device structure and initialize the minimum
8529  *      amount of fields so it can be used to schedule NAPI polls without
8530  *      registering a full blown interface. This is to be used by drivers
8531  *      that need to tie several hardware interfaces to a single NAPI
8532  *      poll scheduler due to HW limitations.
8533  */
8534 int init_dummy_netdev(struct net_device *dev)
8535 {
8536         /* Clear everything. Note we don't initialize spinlocks
8537          * are they aren't supposed to be taken by any of the
8538          * NAPI code and this dummy netdev is supposed to be
8539          * only ever used for NAPI polls
8540          */
8541         memset(dev, 0, sizeof(struct net_device));
8542
8543         /* make sure we BUG if trying to hit standard
8544          * register/unregister code path
8545          */
8546         dev->reg_state = NETREG_DUMMY;
8547
8548         /* NAPI wants this */
8549         INIT_LIST_HEAD(&dev->napi_list);
8550
8551         /* a dummy interface is started by default */
8552         set_bit(__LINK_STATE_PRESENT, &dev->state);
8553         set_bit(__LINK_STATE_START, &dev->state);
8554
8555         /* Note : We dont allocate pcpu_refcnt for dummy devices,
8556          * because users of this 'device' dont need to change
8557          * its refcount.
8558          */
8559
8560         return 0;
8561 }
8562 EXPORT_SYMBOL_GPL(init_dummy_netdev);
8563
8564
8565 /**
8566  *      register_netdev - register a network device
8567  *      @dev: device to register
8568  *
8569  *      Take a completed network device structure and add it to the kernel
8570  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8571  *      chain. 0 is returned on success. A negative errno code is returned
8572  *      on a failure to set up the device, or if the name is a duplicate.
8573  *
8574  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
8575  *      and expands the device name if you passed a format string to
8576  *      alloc_netdev.
8577  */
8578 int register_netdev(struct net_device *dev)
8579 {
8580         int err;
8581
8582         if (rtnl_lock_killable())
8583                 return -EINTR;
8584         err = register_netdevice(dev);
8585         rtnl_unlock();
8586         return err;
8587 }
8588 EXPORT_SYMBOL(register_netdev);
8589
8590 int netdev_refcnt_read(const struct net_device *dev)
8591 {
8592         int i, refcnt = 0;
8593
8594         for_each_possible_cpu(i)
8595                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8596         return refcnt;
8597 }
8598 EXPORT_SYMBOL(netdev_refcnt_read);
8599
8600 /**
8601  * netdev_wait_allrefs - wait until all references are gone.
8602  * @dev: target net_device
8603  *
8604  * This is called when unregistering network devices.
8605  *
8606  * Any protocol or device that holds a reference should register
8607  * for netdevice notification, and cleanup and put back the
8608  * reference if they receive an UNREGISTER event.
8609  * We can get stuck here if buggy protocols don't correctly
8610  * call dev_put.
8611  */
8612 static void netdev_wait_allrefs(struct net_device *dev)
8613 {
8614         unsigned long rebroadcast_time, warning_time;
8615         int refcnt;
8616
8617         linkwatch_forget_dev(dev);
8618
8619         rebroadcast_time = warning_time = jiffies;
8620         refcnt = netdev_refcnt_read(dev);
8621
8622         while (refcnt != 0) {
8623                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
8624                         rtnl_lock();
8625
8626                         /* Rebroadcast unregister notification */
8627                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8628
8629                         __rtnl_unlock();
8630                         rcu_barrier();
8631                         rtnl_lock();
8632
8633                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8634                                      &dev->state)) {
8635                                 /* We must not have linkwatch events
8636                                  * pending on unregister. If this
8637                                  * happens, we simply run the queue
8638                                  * unscheduled, resulting in a noop
8639                                  * for this device.
8640                                  */
8641                                 linkwatch_run_queue();
8642                         }
8643
8644                         __rtnl_unlock();
8645
8646                         rebroadcast_time = jiffies;
8647                 }
8648
8649                 msleep(250);
8650
8651                 refcnt = netdev_refcnt_read(dev);
8652
8653                 if (time_after(jiffies, warning_time + 10 * HZ)) {
8654                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8655                                  dev->name, refcnt);
8656                         warning_time = jiffies;
8657                 }
8658         }
8659 }
8660
8661 /* The sequence is:
8662  *
8663  *      rtnl_lock();
8664  *      ...
8665  *      register_netdevice(x1);
8666  *      register_netdevice(x2);
8667  *      ...
8668  *      unregister_netdevice(y1);
8669  *      unregister_netdevice(y2);
8670  *      ...
8671  *      rtnl_unlock();
8672  *      free_netdev(y1);
8673  *      free_netdev(y2);
8674  *
8675  * We are invoked by rtnl_unlock().
8676  * This allows us to deal with problems:
8677  * 1) We can delete sysfs objects which invoke hotplug
8678  *    without deadlocking with linkwatch via keventd.
8679  * 2) Since we run with the RTNL semaphore not held, we can sleep
8680  *    safely in order to wait for the netdev refcnt to drop to zero.
8681  *
8682  * We must not return until all unregister events added during
8683  * the interval the lock was held have been completed.
8684  */
8685 void netdev_run_todo(void)
8686 {
8687         struct list_head list;
8688
8689         /* Snapshot list, allow later requests */
8690         list_replace_init(&net_todo_list, &list);
8691
8692         __rtnl_unlock();
8693
8694
8695         /* Wait for rcu callbacks to finish before next phase */
8696         if (!list_empty(&list))
8697                 rcu_barrier();
8698
8699         while (!list_empty(&list)) {
8700                 struct net_device *dev
8701                         = list_first_entry(&list, struct net_device, todo_list);
8702                 list_del(&dev->todo_list);
8703
8704                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
8705                         pr_err("network todo '%s' but state %d\n",
8706                                dev->name, dev->reg_state);
8707                         dump_stack();
8708                         continue;
8709                 }
8710
8711                 dev->reg_state = NETREG_UNREGISTERED;
8712
8713                 netdev_wait_allrefs(dev);
8714
8715                 /* paranoia */
8716                 BUG_ON(netdev_refcnt_read(dev));
8717                 BUG_ON(!list_empty(&dev->ptype_all));
8718                 BUG_ON(!list_empty(&dev->ptype_specific));
8719                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8720                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
8721 #if IS_ENABLED(CONFIG_DECNET)
8722                 WARN_ON(dev->dn_ptr);
8723 #endif
8724                 if (dev->priv_destructor)
8725                         dev->priv_destructor(dev);
8726                 if (dev->needs_free_netdev)
8727                         free_netdev(dev);
8728
8729                 /* Report a network device has been unregistered */
8730                 rtnl_lock();
8731                 dev_net(dev)->dev_unreg_count--;
8732                 __rtnl_unlock();
8733                 wake_up(&netdev_unregistering_wq);
8734
8735                 /* Free network device */
8736                 kobject_put(&dev->dev.kobj);
8737         }
8738 }
8739
8740 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8741  * all the same fields in the same order as net_device_stats, with only
8742  * the type differing, but rtnl_link_stats64 may have additional fields
8743  * at the end for newer counters.
8744  */
8745 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
8746                              const struct net_device_stats *netdev_stats)
8747 {
8748 #if BITS_PER_LONG == 64
8749         BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
8750         memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
8751         /* zero out counters that only exist in rtnl_link_stats64 */
8752         memset((char *)stats64 + sizeof(*netdev_stats), 0,
8753                sizeof(*stats64) - sizeof(*netdev_stats));
8754 #else
8755         size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
8756         const unsigned long *src = (const unsigned long *)netdev_stats;
8757         u64 *dst = (u64 *)stats64;
8758
8759         BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
8760         for (i = 0; i < n; i++)
8761                 dst[i] = src[i];
8762         /* zero out counters that only exist in rtnl_link_stats64 */
8763         memset((char *)stats64 + n * sizeof(u64), 0,
8764                sizeof(*stats64) - n * sizeof(u64));
8765 #endif
8766 }
8767 EXPORT_SYMBOL(netdev_stats_to_stats64);
8768
8769 /**
8770  *      dev_get_stats   - get network device statistics
8771  *      @dev: device to get statistics from
8772  *      @storage: place to store stats
8773  *
8774  *      Get network statistics from device. Return @storage.
8775  *      The device driver may provide its own method by setting
8776  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8777  *      otherwise the internal statistics structure is used.
8778  */
8779 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8780                                         struct rtnl_link_stats64 *storage)
8781 {
8782         const struct net_device_ops *ops = dev->netdev_ops;
8783
8784         if (ops->ndo_get_stats64) {
8785                 memset(storage, 0, sizeof(*storage));
8786                 ops->ndo_get_stats64(dev, storage);
8787         } else if (ops->ndo_get_stats) {
8788                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
8789         } else {
8790                 netdev_stats_to_stats64(storage, &dev->stats);
8791         }
8792         storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8793         storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8794         storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
8795         return storage;
8796 }
8797 EXPORT_SYMBOL(dev_get_stats);
8798
8799 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
8800 {
8801         struct netdev_queue *queue = dev_ingress_queue(dev);
8802
8803 #ifdef CONFIG_NET_CLS_ACT
8804         if (queue)
8805                 return queue;
8806         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8807         if (!queue)
8808                 return NULL;
8809         netdev_init_one_queue(dev, queue, NULL);
8810         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
8811         queue->qdisc_sleeping = &noop_qdisc;
8812         rcu_assign_pointer(dev->ingress_queue, queue);
8813 #endif
8814         return queue;
8815 }
8816
8817 static const struct ethtool_ops default_ethtool_ops;
8818
8819 void netdev_set_default_ethtool_ops(struct net_device *dev,
8820                                     const struct ethtool_ops *ops)
8821 {
8822         if (dev->ethtool_ops == &default_ethtool_ops)
8823                 dev->ethtool_ops = ops;
8824 }
8825 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8826
8827 void netdev_freemem(struct net_device *dev)
8828 {
8829         char *addr = (char *)dev - dev->padded;
8830
8831         kvfree(addr);
8832 }
8833
8834 /**
8835  * alloc_netdev_mqs - allocate network device
8836  * @sizeof_priv: size of private data to allocate space for
8837  * @name: device name format string
8838  * @name_assign_type: origin of device name
8839  * @setup: callback to initialize device
8840  * @txqs: the number of TX subqueues to allocate
8841  * @rxqs: the number of RX subqueues to allocate
8842  *
8843  * Allocates a struct net_device with private data area for driver use
8844  * and performs basic initialization.  Also allocates subqueue structs
8845  * for each queue on the device.
8846  */
8847 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
8848                 unsigned char name_assign_type,
8849                 void (*setup)(struct net_device *),
8850                 unsigned int txqs, unsigned int rxqs)
8851 {
8852         struct net_device *dev;
8853         unsigned int alloc_size;
8854         struct net_device *p;
8855
8856         BUG_ON(strlen(name) >= sizeof(dev->name));
8857
8858         if (txqs < 1) {
8859                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8860                 return NULL;
8861         }
8862
8863         if (rxqs < 1) {
8864                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8865                 return NULL;
8866         }
8867
8868         alloc_size = sizeof(struct net_device);
8869         if (sizeof_priv) {
8870                 /* ensure 32-byte alignment of private area */
8871                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
8872                 alloc_size += sizeof_priv;
8873         }
8874         /* ensure 32-byte alignment of whole construct */
8875         alloc_size += NETDEV_ALIGN - 1;
8876
8877         p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8878         if (!p)
8879                 return NULL;
8880
8881         dev = PTR_ALIGN(p, NETDEV_ALIGN);
8882         dev->padded = (char *)dev - (char *)p;
8883
8884         dev->pcpu_refcnt = alloc_percpu(int);
8885         if (!dev->pcpu_refcnt)
8886                 goto free_dev;
8887
8888         if (dev_addr_init(dev))
8889                 goto free_pcpu;
8890
8891         dev_mc_init(dev);
8892         dev_uc_init(dev);
8893
8894         dev_net_set(dev, &init_net);
8895
8896         dev->gso_max_size = GSO_MAX_SIZE;
8897         dev->gso_max_segs = GSO_MAX_SEGS;
8898
8899         INIT_LIST_HEAD(&dev->napi_list);
8900         INIT_LIST_HEAD(&dev->unreg_list);
8901         INIT_LIST_HEAD(&dev->close_list);
8902         INIT_LIST_HEAD(&dev->link_watch_list);
8903         INIT_LIST_HEAD(&dev->adj_list.upper);
8904         INIT_LIST_HEAD(&dev->adj_list.lower);
8905         INIT_LIST_HEAD(&dev->ptype_all);
8906         INIT_LIST_HEAD(&dev->ptype_specific);
8907 #ifdef CONFIG_NET_SCHED
8908         hash_init(dev->qdisc_hash);
8909 #endif
8910         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8911         setup(dev);
8912
8913         if (!dev->tx_queue_len) {
8914                 dev->priv_flags |= IFF_NO_QUEUE;
8915                 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
8916         }
8917
8918         dev->num_tx_queues = txqs;
8919         dev->real_num_tx_queues = txqs;
8920         if (netif_alloc_netdev_queues(dev))
8921                 goto free_all;
8922
8923         dev->num_rx_queues = rxqs;
8924         dev->real_num_rx_queues = rxqs;
8925         if (netif_alloc_rx_queues(dev))
8926                 goto free_all;
8927
8928         strcpy(dev->name, name);
8929         dev->name_assign_type = name_assign_type;
8930         dev->group = INIT_NETDEV_GROUP;
8931         if (!dev->ethtool_ops)
8932                 dev->ethtool_ops = &default_ethtool_ops;
8933
8934         nf_hook_ingress_init(dev);
8935
8936         return dev;
8937
8938 free_all:
8939         free_netdev(dev);
8940         return NULL;
8941
8942 free_pcpu:
8943         free_percpu(dev->pcpu_refcnt);
8944 free_dev:
8945         netdev_freemem(dev);
8946         return NULL;
8947 }
8948 EXPORT_SYMBOL(alloc_netdev_mqs);
8949
8950 /**
8951  * free_netdev - free network device
8952  * @dev: device
8953  *
8954  * This function does the last stage of destroying an allocated device
8955  * interface. The reference to the device object is released. If this
8956  * is the last reference then it will be freed.Must be called in process
8957  * context.
8958  */
8959 void free_netdev(struct net_device *dev)
8960 {
8961         struct napi_struct *p, *n;
8962
8963         might_sleep();
8964         netif_free_tx_queues(dev);
8965         netif_free_rx_queues(dev);
8966
8967         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
8968
8969         /* Flush device addresses */
8970         dev_addr_flush(dev);
8971
8972         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8973                 netif_napi_del(p);
8974
8975         free_percpu(dev->pcpu_refcnt);
8976         dev->pcpu_refcnt = NULL;
8977
8978         /*  Compatibility with error handling in drivers */
8979         if (dev->reg_state == NETREG_UNINITIALIZED) {
8980                 netdev_freemem(dev);
8981                 return;
8982         }
8983
8984         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8985         dev->reg_state = NETREG_RELEASED;
8986
8987         /* will free via device release */
8988         put_device(&dev->dev);
8989 }
8990 EXPORT_SYMBOL(free_netdev);
8991
8992 /**
8993  *      synchronize_net -  Synchronize with packet receive processing
8994  *
8995  *      Wait for packets currently being received to be done.
8996  *      Does not block later packets from starting.
8997  */
8998 void synchronize_net(void)
8999 {
9000         might_sleep();
9001         if (rtnl_is_locked())
9002                 synchronize_rcu_expedited();
9003         else
9004                 synchronize_rcu();
9005 }
9006 EXPORT_SYMBOL(synchronize_net);
9007
9008 /**
9009  *      unregister_netdevice_queue - remove device from the kernel
9010  *      @dev: device
9011  *      @head: list
9012  *
9013  *      This function shuts down a device interface and removes it
9014  *      from the kernel tables.
9015  *      If head not NULL, device is queued to be unregistered later.
9016  *
9017  *      Callers must hold the rtnl semaphore.  You may want
9018  *      unregister_netdev() instead of this.
9019  */
9020
9021 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
9022 {
9023         ASSERT_RTNL();
9024
9025         if (head) {
9026                 list_move_tail(&dev->unreg_list, head);
9027         } else {
9028                 rollback_registered(dev);
9029                 /* Finish processing unregister after unlock */
9030                 net_set_todo(dev);
9031         }
9032 }
9033 EXPORT_SYMBOL(unregister_netdevice_queue);
9034
9035 /**
9036  *      unregister_netdevice_many - unregister many devices
9037  *      @head: list of devices
9038  *
9039  *  Note: As most callers use a stack allocated list_head,
9040  *  we force a list_del() to make sure stack wont be corrupted later.
9041  */
9042 void unregister_netdevice_many(struct list_head *head)
9043 {
9044         struct net_device *dev;
9045
9046         if (!list_empty(head)) {
9047                 rollback_registered_many(head);
9048                 list_for_each_entry(dev, head, unreg_list)
9049                         net_set_todo(dev);
9050                 list_del(head);
9051         }
9052 }
9053 EXPORT_SYMBOL(unregister_netdevice_many);
9054
9055 /**
9056  *      unregister_netdev - remove device from the kernel
9057  *      @dev: device
9058  *
9059  *      This function shuts down a device interface and removes it
9060  *      from the kernel tables.
9061  *
9062  *      This is just a wrapper for unregister_netdevice that takes
9063  *      the rtnl semaphore.  In general you want to use this and not
9064  *      unregister_netdevice.
9065  */
9066 void unregister_netdev(struct net_device *dev)
9067 {
9068         rtnl_lock();
9069         unregister_netdevice(dev);
9070         rtnl_unlock();
9071 }
9072 EXPORT_SYMBOL(unregister_netdev);
9073
9074 /**
9075  *      dev_change_net_namespace - move device to different nethost namespace
9076  *      @dev: device
9077  *      @net: network namespace
9078  *      @pat: If not NULL name pattern to try if the current device name
9079  *            is already taken in the destination network namespace.
9080  *
9081  *      This function shuts down a device interface and moves it
9082  *      to a new network namespace. On success 0 is returned, on
9083  *      a failure a netagive errno code is returned.
9084  *
9085  *      Callers must hold the rtnl semaphore.
9086  */
9087
9088 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
9089 {
9090         int err, new_nsid, new_ifindex;
9091
9092         ASSERT_RTNL();
9093
9094         /* Don't allow namespace local devices to be moved. */
9095         err = -EINVAL;
9096         if (dev->features & NETIF_F_NETNS_LOCAL)
9097                 goto out;
9098
9099         /* Ensure the device has been registrered */
9100         if (dev->reg_state != NETREG_REGISTERED)
9101                 goto out;
9102
9103         /* Get out if there is nothing todo */
9104         err = 0;
9105         if (net_eq(dev_net(dev), net))
9106                 goto out;
9107
9108         /* Pick the destination device name, and ensure
9109          * we can use it in the destination network namespace.
9110          */
9111         err = -EEXIST;
9112         if (__dev_get_by_name(net, dev->name)) {
9113                 /* We get here if we can't use the current device name */
9114                 if (!pat)
9115                         goto out;
9116                 err = dev_get_valid_name(net, dev, pat);
9117                 if (err < 0)
9118                         goto out;
9119         }
9120
9121         /*
9122          * And now a mini version of register_netdevice unregister_netdevice.
9123          */
9124
9125         /* If device is running close it first. */
9126         dev_close(dev);
9127
9128         /* And unlink it from device chain */
9129         unlist_netdevice(dev);
9130
9131         synchronize_net();
9132
9133         /* Shutdown queueing discipline. */
9134         dev_shutdown(dev);
9135
9136         /* Notify protocols, that we are about to destroy
9137          * this device. They should clean all the things.
9138          *
9139          * Note that dev->reg_state stays at NETREG_REGISTERED.
9140          * This is wanted because this way 8021q and macvlan know
9141          * the device is just moving and can keep their slaves up.
9142          */
9143         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9144         rcu_barrier();
9145
9146         new_nsid = peernet2id_alloc(dev_net(dev), net);
9147         /* If there is an ifindex conflict assign a new one */
9148         if (__dev_get_by_index(net, dev->ifindex))
9149                 new_ifindex = dev_new_index(net);
9150         else
9151                 new_ifindex = dev->ifindex;
9152
9153         rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
9154                             new_ifindex);
9155
9156         /*
9157          *      Flush the unicast and multicast chains
9158          */
9159         dev_uc_flush(dev);
9160         dev_mc_flush(dev);
9161
9162         /* Send a netdev-removed uevent to the old namespace */
9163         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
9164         netdev_adjacent_del_links(dev);
9165
9166         /* Actually switch the network namespace */
9167         dev_net_set(dev, net);
9168         dev->ifindex = new_ifindex;
9169
9170         /* Send a netdev-add uevent to the new namespace */
9171         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
9172         netdev_adjacent_add_links(dev);
9173
9174         /* Fixup kobjects */
9175         err = device_rename(&dev->dev, dev->name);
9176         WARN_ON(err);
9177
9178         /* Add the device back in the hashes */
9179         list_netdevice(dev);
9180
9181         /* Notify protocols, that a new device appeared. */
9182         call_netdevice_notifiers(NETDEV_REGISTER, dev);
9183
9184         /*
9185          *      Prevent userspace races by waiting until the network
9186          *      device is fully setup before sending notifications.
9187          */
9188         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9189
9190         synchronize_net();
9191         err = 0;
9192 out:
9193         return err;
9194 }
9195 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
9196
9197 static int dev_cpu_dead(unsigned int oldcpu)
9198 {
9199         struct sk_buff **list_skb;
9200         struct sk_buff *skb;
9201         unsigned int cpu;
9202         struct softnet_data *sd, *oldsd, *remsd = NULL;
9203
9204         local_irq_disable();
9205         cpu = smp_processor_id();
9206         sd = &per_cpu(softnet_data, cpu);
9207         oldsd = &per_cpu(softnet_data, oldcpu);
9208
9209         /* Find end of our completion_queue. */
9210         list_skb = &sd->completion_queue;
9211         while (*list_skb)
9212                 list_skb = &(*list_skb)->next;
9213         /* Append completion queue from offline CPU. */
9214         *list_skb = oldsd->completion_queue;
9215         oldsd->completion_queue = NULL;
9216
9217         /* Append output queue from offline CPU. */
9218         if (oldsd->output_queue) {
9219                 *sd->output_queue_tailp = oldsd->output_queue;
9220                 sd->output_queue_tailp = oldsd->output_queue_tailp;
9221                 oldsd->output_queue = NULL;
9222                 oldsd->output_queue_tailp = &oldsd->output_queue;
9223         }
9224         /* Append NAPI poll list from offline CPU, with one exception :
9225          * process_backlog() must be called by cpu owning percpu backlog.
9226          * We properly handle process_queue & input_pkt_queue later.
9227          */
9228         while (!list_empty(&oldsd->poll_list)) {
9229                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9230                                                             struct napi_struct,
9231                                                             poll_list);
9232
9233                 list_del_init(&napi->poll_list);
9234                 if (napi->poll == process_backlog)
9235                         napi->state = 0;
9236                 else
9237                         ____napi_schedule(sd, napi);
9238         }
9239
9240         raise_softirq_irqoff(NET_TX_SOFTIRQ);
9241         local_irq_enable();
9242
9243 #ifdef CONFIG_RPS
9244         remsd = oldsd->rps_ipi_list;
9245         oldsd->rps_ipi_list = NULL;
9246 #endif
9247         /* send out pending IPI's on offline CPU */
9248         net_rps_send_ipi(remsd);
9249
9250         /* Process offline CPU's input_pkt_queue */
9251         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
9252                 netif_rx_ni(skb);
9253                 input_queue_head_incr(oldsd);
9254         }
9255         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
9256                 netif_rx_ni(skb);
9257                 input_queue_head_incr(oldsd);
9258         }
9259
9260         return 0;
9261 }
9262
9263 /**
9264  *      netdev_increment_features - increment feature set by one
9265  *      @all: current feature set
9266  *      @one: new feature set
9267  *      @mask: mask feature set
9268  *
9269  *      Computes a new feature set after adding a device with feature set
9270  *      @one to the master device with current feature set @all.  Will not
9271  *      enable anything that is off in @mask. Returns the new feature set.
9272  */
9273 netdev_features_t netdev_increment_features(netdev_features_t all,
9274         netdev_features_t one, netdev_features_t mask)
9275 {
9276         if (mask & NETIF_F_HW_CSUM)
9277                 mask |= NETIF_F_CSUM_MASK;
9278         mask |= NETIF_F_VLAN_CHALLENGED;
9279
9280         all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
9281         all &= one | ~NETIF_F_ALL_FOR_ALL;
9282
9283         /* If one device supports hw checksumming, set for all. */
9284         if (all & NETIF_F_HW_CSUM)
9285                 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
9286
9287         return all;
9288 }
9289 EXPORT_SYMBOL(netdev_increment_features);
9290
9291 static struct hlist_head * __net_init netdev_create_hash(void)
9292 {
9293         int i;
9294         struct hlist_head *hash;
9295
9296         hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
9297         if (hash != NULL)
9298                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9299                         INIT_HLIST_HEAD(&hash[i]);
9300
9301         return hash;
9302 }
9303
9304 /* Initialize per network namespace state */
9305 static int __net_init netdev_init(struct net *net)
9306 {
9307         BUILD_BUG_ON(GRO_HASH_BUCKETS >
9308                      8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
9309
9310         if (net != &init_net)
9311                 INIT_LIST_HEAD(&net->dev_base_head);
9312
9313         net->dev_name_head = netdev_create_hash();
9314         if (net->dev_name_head == NULL)
9315                 goto err_name;
9316
9317         net->dev_index_head = netdev_create_hash();
9318         if (net->dev_index_head == NULL)
9319                 goto err_idx;
9320
9321         return 0;
9322
9323 err_idx:
9324         kfree(net->dev_name_head);
9325 err_name:
9326         return -ENOMEM;
9327 }
9328
9329 /**
9330  *      netdev_drivername - network driver for the device
9331  *      @dev: network device
9332  *
9333  *      Determine network driver for device.
9334  */
9335 const char *netdev_drivername(const struct net_device *dev)
9336 {
9337         const struct device_driver *driver;
9338         const struct device *parent;
9339         const char *empty = "";
9340
9341         parent = dev->dev.parent;
9342         if (!parent)
9343                 return empty;
9344
9345         driver = parent->driver;
9346         if (driver && driver->name)
9347                 return driver->name;
9348         return empty;
9349 }
9350
9351 static void __netdev_printk(const char *level, const struct net_device *dev,
9352                             struct va_format *vaf)
9353 {
9354         if (dev && dev->dev.parent) {
9355                 dev_printk_emit(level[1] - '0',
9356                                 dev->dev.parent,
9357                                 "%s %s %s%s: %pV",
9358                                 dev_driver_string(dev->dev.parent),
9359                                 dev_name(dev->dev.parent),
9360                                 netdev_name(dev), netdev_reg_state(dev),
9361                                 vaf);
9362         } else if (dev) {
9363                 printk("%s%s%s: %pV",
9364                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
9365         } else {
9366                 printk("%s(NULL net_device): %pV", level, vaf);
9367         }
9368 }
9369
9370 void netdev_printk(const char *level, const struct net_device *dev,
9371                    const char *format, ...)
9372 {
9373         struct va_format vaf;
9374         va_list args;
9375
9376         va_start(args, format);
9377
9378         vaf.fmt = format;
9379         vaf.va = &args;
9380
9381         __netdev_printk(level, dev, &vaf);
9382
9383         va_end(args);
9384 }
9385 EXPORT_SYMBOL(netdev_printk);
9386
9387 #define define_netdev_printk_level(func, level)                 \
9388 void func(const struct net_device *dev, const char *fmt, ...)   \
9389 {                                                               \
9390         struct va_format vaf;                                   \
9391         va_list args;                                           \
9392                                                                 \
9393         va_start(args, fmt);                                    \
9394                                                                 \
9395         vaf.fmt = fmt;                                          \
9396         vaf.va = &args;                                         \
9397                                                                 \
9398         __netdev_printk(level, dev, &vaf);                      \
9399                                                                 \
9400         va_end(args);                                           \
9401 }                                                               \
9402 EXPORT_SYMBOL(func);
9403
9404 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9405 define_netdev_printk_level(netdev_alert, KERN_ALERT);
9406 define_netdev_printk_level(netdev_crit, KERN_CRIT);
9407 define_netdev_printk_level(netdev_err, KERN_ERR);
9408 define_netdev_printk_level(netdev_warn, KERN_WARNING);
9409 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9410 define_netdev_printk_level(netdev_info, KERN_INFO);
9411
9412 static void __net_exit netdev_exit(struct net *net)
9413 {
9414         kfree(net->dev_name_head);
9415         kfree(net->dev_index_head);
9416         if (net != &init_net)
9417                 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
9418 }
9419
9420 static struct pernet_operations __net_initdata netdev_net_ops = {
9421         .init = netdev_init,
9422         .exit = netdev_exit,
9423 };
9424
9425 static void __net_exit default_device_exit(struct net *net)
9426 {
9427         struct net_device *dev, *aux;
9428         /*
9429          * Push all migratable network devices back to the
9430          * initial network namespace
9431          */
9432         rtnl_lock();
9433         for_each_netdev_safe(net, dev, aux) {
9434                 int err;
9435                 char fb_name[IFNAMSIZ];
9436
9437                 /* Ignore unmoveable devices (i.e. loopback) */
9438                 if (dev->features & NETIF_F_NETNS_LOCAL)
9439                         continue;
9440
9441                 /* Leave virtual devices for the generic cleanup */
9442                 if (dev->rtnl_link_ops)
9443                         continue;
9444
9445                 /* Push remaining network devices to init_net */
9446                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9447                 err = dev_change_net_namespace(dev, &init_net, fb_name);
9448                 if (err) {
9449                         pr_emerg("%s: failed to move %s to init_net: %d\n",
9450                                  __func__, dev->name, err);
9451                         BUG();
9452                 }
9453         }
9454         rtnl_unlock();
9455 }
9456
9457 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9458 {
9459         /* Return with the rtnl_lock held when there are no network
9460          * devices unregistering in any network namespace in net_list.
9461          */
9462         struct net *net;
9463         bool unregistering;
9464         DEFINE_WAIT_FUNC(wait, woken_wake_function);
9465
9466         add_wait_queue(&netdev_unregistering_wq, &wait);
9467         for (;;) {
9468                 unregistering = false;
9469                 rtnl_lock();
9470                 list_for_each_entry(net, net_list, exit_list) {
9471                         if (net->dev_unreg_count > 0) {
9472                                 unregistering = true;
9473                                 break;
9474                         }
9475                 }
9476                 if (!unregistering)
9477                         break;
9478                 __rtnl_unlock();
9479
9480                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
9481         }
9482         remove_wait_queue(&netdev_unregistering_wq, &wait);
9483 }
9484
9485 static void __net_exit default_device_exit_batch(struct list_head *net_list)
9486 {
9487         /* At exit all network devices most be removed from a network
9488          * namespace.  Do this in the reverse order of registration.
9489          * Do this across as many network namespaces as possible to
9490          * improve batching efficiency.
9491          */
9492         struct net_device *dev;
9493         struct net *net;
9494         LIST_HEAD(dev_kill_list);
9495
9496         /* To prevent network device cleanup code from dereferencing
9497          * loopback devices or network devices that have been freed
9498          * wait here for all pending unregistrations to complete,
9499          * before unregistring the loopback device and allowing the
9500          * network namespace be freed.
9501          *
9502          * The netdev todo list containing all network devices
9503          * unregistrations that happen in default_device_exit_batch
9504          * will run in the rtnl_unlock() at the end of
9505          * default_device_exit_batch.
9506          */
9507         rtnl_lock_unregistering(net_list);
9508         list_for_each_entry(net, net_list, exit_list) {
9509                 for_each_netdev_reverse(net, dev) {
9510                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
9511                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9512                         else
9513                                 unregister_netdevice_queue(dev, &dev_kill_list);
9514                 }
9515         }
9516         unregister_netdevice_many(&dev_kill_list);
9517         rtnl_unlock();
9518 }
9519
9520 static struct pernet_operations __net_initdata default_device_ops = {
9521         .exit = default_device_exit,
9522         .exit_batch = default_device_exit_batch,
9523 };
9524
9525 /*
9526  *      Initialize the DEV module. At boot time this walks the device list and
9527  *      unhooks any devices that fail to initialise (normally hardware not
9528  *      present) and leaves us with a valid list of present and active devices.
9529  *
9530  */
9531
9532 /*
9533  *       This is called single threaded during boot, so no need
9534  *       to take the rtnl semaphore.
9535  */
9536 static int __init net_dev_init(void)
9537 {
9538         int i, rc = -ENOMEM;
9539
9540         BUG_ON(!dev_boot_phase);
9541
9542         if (dev_proc_init())
9543                 goto out;
9544
9545         if (netdev_kobject_init())
9546                 goto out;
9547
9548         INIT_LIST_HEAD(&ptype_all);
9549         for (i = 0; i < PTYPE_HASH_SIZE; i++)
9550                 INIT_LIST_HEAD(&ptype_base[i]);
9551
9552         INIT_LIST_HEAD(&offload_base);
9553
9554         if (register_pernet_subsys(&netdev_net_ops))
9555                 goto out;
9556
9557         /*
9558          *      Initialise the packet receive queues.
9559          */
9560
9561         for_each_possible_cpu(i) {
9562                 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
9563                 struct softnet_data *sd = &per_cpu(softnet_data, i);
9564
9565                 INIT_WORK(flush, flush_backlog);
9566
9567                 skb_queue_head_init(&sd->input_pkt_queue);
9568                 skb_queue_head_init(&sd->process_queue);
9569 #ifdef CONFIG_XFRM_OFFLOAD
9570                 skb_queue_head_init(&sd->xfrm_backlog);
9571 #endif
9572                 INIT_LIST_HEAD(&sd->poll_list);
9573                 sd->output_queue_tailp = &sd->output_queue;
9574 #ifdef CONFIG_RPS
9575                 sd->csd.func = rps_trigger_softirq;
9576                 sd->csd.info = sd;
9577                 sd->cpu = i;
9578 #endif
9579
9580                 init_gro_hash(&sd->backlog);
9581                 sd->backlog.poll = process_backlog;
9582                 sd->backlog.weight = weight_p;
9583         }
9584
9585         dev_boot_phase = 0;
9586
9587         /* The loopback device is special if any other network devices
9588          * is present in a network namespace the loopback device must
9589          * be present. Since we now dynamically allocate and free the
9590          * loopback device ensure this invariant is maintained by
9591          * keeping the loopback device as the first device on the
9592          * list of network devices.  Ensuring the loopback devices
9593          * is the first device that appears and the last network device
9594          * that disappears.
9595          */
9596         if (register_pernet_device(&loopback_net_ops))
9597                 goto out;
9598
9599         if (register_pernet_device(&default_device_ops))
9600                 goto out;
9601
9602         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9603         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
9604
9605         rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9606                                        NULL, dev_cpu_dead);
9607         WARN_ON(rc < 0);
9608         rc = 0;
9609 out:
9610         return rc;
9611 }
9612
9613 subsys_initcall(net_dev_init);