4f8b92d81d107fc9acd2499297435cbd9e9b5c67
[linux-2.6-microblaze.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <linux/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/sched/mm.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
87 #include <linux/mm.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <linux/bpf.h>
99 #include <linux/bpf_trace.h>
100 #include <net/net_namespace.h>
101 #include <net/sock.h>
102 #include <net/busy_poll.h>
103 #include <linux/rtnetlink.h>
104 #include <linux/stat.h>
105 #include <net/dst.h>
106 #include <net/dst_metadata.h>
107 #include <net/pkt_sched.h>
108 #include <net/pkt_cls.h>
109 #include <net/checksum.h>
110 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
126 #include <net/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <linux/pci.h>
136 #include <linux/inetdevice.h>
137 #include <linux/cpu_rmap.h>
138 #include <linux/static_key.h>
139 #include <linux/hashtable.h>
140 #include <linux/vmalloc.h>
141 #include <linux/if_macvlan.h>
142 #include <linux/errqueue.h>
143 #include <linux/hrtimer.h>
144 #include <linux/netfilter_ingress.h>
145 #include <linux/crash_dump.h>
146 #include <linux/sctp.h>
147 #include <net/udp_tunnel.h>
148 #include <linux/net_namespace.h>
149
150 #include "net-sysfs.h"
151
152 #define MAX_GRO_SKBS 8
153
154 /* This should be increased if a protocol with a bigger head is added. */
155 #define GRO_MAX_HEAD (MAX_HEADER + 128)
156
157 static DEFINE_SPINLOCK(ptype_lock);
158 static DEFINE_SPINLOCK(offload_lock);
159 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
160 struct list_head ptype_all __read_mostly;       /* Taps */
161 static struct list_head offload_base __read_mostly;
162
163 static int netif_rx_internal(struct sk_buff *skb);
164 static int call_netdevice_notifiers_info(unsigned long val,
165                                          struct netdev_notifier_info *info);
166 static struct napi_struct *napi_by_id(unsigned int napi_id);
167
168 /*
169  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
170  * semaphore.
171  *
172  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
173  *
174  * Writers must hold the rtnl semaphore while they loop through the
175  * dev_base_head list, and hold dev_base_lock for writing when they do the
176  * actual updates.  This allows pure readers to access the list even
177  * while a writer is preparing to update it.
178  *
179  * To put it another way, dev_base_lock is held for writing only to
180  * protect against pure readers; the rtnl semaphore provides the
181  * protection against other writers.
182  *
183  * See, for example usages, register_netdevice() and
184  * unregister_netdevice(), which must be called with the rtnl
185  * semaphore held.
186  */
187 DEFINE_RWLOCK(dev_base_lock);
188 EXPORT_SYMBOL(dev_base_lock);
189
190 static DEFINE_MUTEX(ifalias_mutex);
191
192 /* protects napi_hash addition/deletion and napi_gen_id */
193 static DEFINE_SPINLOCK(napi_hash_lock);
194
195 static unsigned int napi_gen_id = NR_CPUS;
196 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
197
198 static seqcount_t devnet_rename_seq;
199
200 static inline void dev_base_seq_inc(struct net *net)
201 {
202         while (++net->dev_base_seq == 0)
203                 ;
204 }
205
206 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
207 {
208         unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
209
210         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
211 }
212
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
214 {
215         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
216 }
217
218 static inline void rps_lock(struct softnet_data *sd)
219 {
220 #ifdef CONFIG_RPS
221         spin_lock(&sd->input_pkt_queue.lock);
222 #endif
223 }
224
225 static inline void rps_unlock(struct softnet_data *sd)
226 {
227 #ifdef CONFIG_RPS
228         spin_unlock(&sd->input_pkt_queue.lock);
229 #endif
230 }
231
232 /* Device list insertion */
233 static void list_netdevice(struct net_device *dev)
234 {
235         struct net *net = dev_net(dev);
236
237         ASSERT_RTNL();
238
239         write_lock_bh(&dev_base_lock);
240         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
241         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
242         hlist_add_head_rcu(&dev->index_hlist,
243                            dev_index_hash(net, dev->ifindex));
244         write_unlock_bh(&dev_base_lock);
245
246         dev_base_seq_inc(net);
247 }
248
249 /* Device list removal
250  * caller must respect a RCU grace period before freeing/reusing dev
251  */
252 static void unlist_netdevice(struct net_device *dev)
253 {
254         ASSERT_RTNL();
255
256         /* Unlink dev from the device chain */
257         write_lock_bh(&dev_base_lock);
258         list_del_rcu(&dev->dev_list);
259         hlist_del_rcu(&dev->name_hlist);
260         hlist_del_rcu(&dev->index_hlist);
261         write_unlock_bh(&dev_base_lock);
262
263         dev_base_seq_inc(dev_net(dev));
264 }
265
266 /*
267  *      Our notifier list
268  */
269
270 static RAW_NOTIFIER_HEAD(netdev_chain);
271
272 /*
273  *      Device drivers call our routines to queue packets here. We empty the
274  *      queue in the local softnet handler.
275  */
276
277 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
278 EXPORT_PER_CPU_SYMBOL(softnet_data);
279
280 #ifdef CONFIG_LOCKDEP
281 /*
282  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
283  * according to dev->type
284  */
285 static const unsigned short netdev_lock_type[] = {
286          ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
287          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
288          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
289          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
290          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
291          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
292          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
293          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
294          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
295          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
296          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
297          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
298          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
299          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
300          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
301
302 static const char *const netdev_lock_name[] = {
303         "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
304         "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
305         "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
306         "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
307         "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
308         "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
309         "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
310         "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
311         "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
312         "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
313         "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
314         "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
315         "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
316         "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
317         "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
318
319 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
320 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
321
322 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
323 {
324         int i;
325
326         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
327                 if (netdev_lock_type[i] == dev_type)
328                         return i;
329         /* the last key is used by default */
330         return ARRAY_SIZE(netdev_lock_type) - 1;
331 }
332
333 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
334                                                  unsigned short dev_type)
335 {
336         int i;
337
338         i = netdev_lock_pos(dev_type);
339         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
340                                    netdev_lock_name[i]);
341 }
342
343 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
344 {
345         int i;
346
347         i = netdev_lock_pos(dev->type);
348         lockdep_set_class_and_name(&dev->addr_list_lock,
349                                    &netdev_addr_lock_key[i],
350                                    netdev_lock_name[i]);
351 }
352 #else
353 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
354                                                  unsigned short dev_type)
355 {
356 }
357 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
358 {
359 }
360 #endif
361
362 /*******************************************************************************
363  *
364  *              Protocol management and registration routines
365  *
366  *******************************************************************************/
367
368
369 /*
370  *      Add a protocol ID to the list. Now that the input handler is
371  *      smarter we can dispense with all the messy stuff that used to be
372  *      here.
373  *
374  *      BEWARE!!! Protocol handlers, mangling input packets,
375  *      MUST BE last in hash buckets and checking protocol handlers
376  *      MUST start from promiscuous ptype_all chain in net_bh.
377  *      It is true now, do not change it.
378  *      Explanation follows: if protocol handler, mangling packet, will
379  *      be the first on list, it is not able to sense, that packet
380  *      is cloned and should be copied-on-write, so that it will
381  *      change it and subsequent readers will get broken packet.
382  *                                                      --ANK (980803)
383  */
384
385 static inline struct list_head *ptype_head(const struct packet_type *pt)
386 {
387         if (pt->type == htons(ETH_P_ALL))
388                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
389         else
390                 return pt->dev ? &pt->dev->ptype_specific :
391                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
392 }
393
394 /**
395  *      dev_add_pack - add packet handler
396  *      @pt: packet type declaration
397  *
398  *      Add a protocol handler to the networking stack. The passed &packet_type
399  *      is linked into kernel lists and may not be freed until it has been
400  *      removed from the kernel lists.
401  *
402  *      This call does not sleep therefore it can not
403  *      guarantee all CPU's that are in middle of receiving packets
404  *      will see the new packet type (until the next received packet).
405  */
406
407 void dev_add_pack(struct packet_type *pt)
408 {
409         struct list_head *head = ptype_head(pt);
410
411         spin_lock(&ptype_lock);
412         list_add_rcu(&pt->list, head);
413         spin_unlock(&ptype_lock);
414 }
415 EXPORT_SYMBOL(dev_add_pack);
416
417 /**
418  *      __dev_remove_pack        - remove packet handler
419  *      @pt: packet type declaration
420  *
421  *      Remove a protocol handler that was previously added to the kernel
422  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
423  *      from the kernel lists and can be freed or reused once this function
424  *      returns.
425  *
426  *      The packet type might still be in use by receivers
427  *      and must not be freed until after all the CPU's have gone
428  *      through a quiescent state.
429  */
430 void __dev_remove_pack(struct packet_type *pt)
431 {
432         struct list_head *head = ptype_head(pt);
433         struct packet_type *pt1;
434
435         spin_lock(&ptype_lock);
436
437         list_for_each_entry(pt1, head, list) {
438                 if (pt == pt1) {
439                         list_del_rcu(&pt->list);
440                         goto out;
441                 }
442         }
443
444         pr_warn("dev_remove_pack: %p not found\n", pt);
445 out:
446         spin_unlock(&ptype_lock);
447 }
448 EXPORT_SYMBOL(__dev_remove_pack);
449
450 /**
451  *      dev_remove_pack  - remove packet handler
452  *      @pt: packet type declaration
453  *
454  *      Remove a protocol handler that was previously added to the kernel
455  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
456  *      from the kernel lists and can be freed or reused once this function
457  *      returns.
458  *
459  *      This call sleeps to guarantee that no CPU is looking at the packet
460  *      type after return.
461  */
462 void dev_remove_pack(struct packet_type *pt)
463 {
464         __dev_remove_pack(pt);
465
466         synchronize_net();
467 }
468 EXPORT_SYMBOL(dev_remove_pack);
469
470
471 /**
472  *      dev_add_offload - register offload handlers
473  *      @po: protocol offload declaration
474  *
475  *      Add protocol offload handlers to the networking stack. The passed
476  *      &proto_offload is linked into kernel lists and may not be freed until
477  *      it has been removed from the kernel lists.
478  *
479  *      This call does not sleep therefore it can not
480  *      guarantee all CPU's that are in middle of receiving packets
481  *      will see the new offload handlers (until the next received packet).
482  */
483 void dev_add_offload(struct packet_offload *po)
484 {
485         struct packet_offload *elem;
486
487         spin_lock(&offload_lock);
488         list_for_each_entry(elem, &offload_base, list) {
489                 if (po->priority < elem->priority)
490                         break;
491         }
492         list_add_rcu(&po->list, elem->list.prev);
493         spin_unlock(&offload_lock);
494 }
495 EXPORT_SYMBOL(dev_add_offload);
496
497 /**
498  *      __dev_remove_offload     - remove offload handler
499  *      @po: packet offload declaration
500  *
501  *      Remove a protocol offload handler that was previously added to the
502  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
503  *      is removed from the kernel lists and can be freed or reused once this
504  *      function returns.
505  *
506  *      The packet type might still be in use by receivers
507  *      and must not be freed until after all the CPU's have gone
508  *      through a quiescent state.
509  */
510 static void __dev_remove_offload(struct packet_offload *po)
511 {
512         struct list_head *head = &offload_base;
513         struct packet_offload *po1;
514
515         spin_lock(&offload_lock);
516
517         list_for_each_entry(po1, head, list) {
518                 if (po == po1) {
519                         list_del_rcu(&po->list);
520                         goto out;
521                 }
522         }
523
524         pr_warn("dev_remove_offload: %p not found\n", po);
525 out:
526         spin_unlock(&offload_lock);
527 }
528
529 /**
530  *      dev_remove_offload       - remove packet offload handler
531  *      @po: packet offload declaration
532  *
533  *      Remove a packet offload handler that was previously added to the kernel
534  *      offload handlers by dev_add_offload(). The passed &offload_type is
535  *      removed from the kernel lists and can be freed or reused once this
536  *      function returns.
537  *
538  *      This call sleeps to guarantee that no CPU is looking at the packet
539  *      type after return.
540  */
541 void dev_remove_offload(struct packet_offload *po)
542 {
543         __dev_remove_offload(po);
544
545         synchronize_net();
546 }
547 EXPORT_SYMBOL(dev_remove_offload);
548
549 /******************************************************************************
550  *
551  *                    Device Boot-time Settings Routines
552  *
553  ******************************************************************************/
554
555 /* Boot time configuration table */
556 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
557
558 /**
559  *      netdev_boot_setup_add   - add new setup entry
560  *      @name: name of the device
561  *      @map: configured settings for the device
562  *
563  *      Adds new setup entry to the dev_boot_setup list.  The function
564  *      returns 0 on error and 1 on success.  This is a generic routine to
565  *      all netdevices.
566  */
567 static int netdev_boot_setup_add(char *name, struct ifmap *map)
568 {
569         struct netdev_boot_setup *s;
570         int i;
571
572         s = dev_boot_setup;
573         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
574                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
575                         memset(s[i].name, 0, sizeof(s[i].name));
576                         strlcpy(s[i].name, name, IFNAMSIZ);
577                         memcpy(&s[i].map, map, sizeof(s[i].map));
578                         break;
579                 }
580         }
581
582         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
583 }
584
585 /**
586  * netdev_boot_setup_check      - check boot time settings
587  * @dev: the netdevice
588  *
589  * Check boot time settings for the device.
590  * The found settings are set for the device to be used
591  * later in the device probing.
592  * Returns 0 if no settings found, 1 if they are.
593  */
594 int netdev_boot_setup_check(struct net_device *dev)
595 {
596         struct netdev_boot_setup *s = dev_boot_setup;
597         int i;
598
599         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
600                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
601                     !strcmp(dev->name, s[i].name)) {
602                         dev->irq = s[i].map.irq;
603                         dev->base_addr = s[i].map.base_addr;
604                         dev->mem_start = s[i].map.mem_start;
605                         dev->mem_end = s[i].map.mem_end;
606                         return 1;
607                 }
608         }
609         return 0;
610 }
611 EXPORT_SYMBOL(netdev_boot_setup_check);
612
613
614 /**
615  * netdev_boot_base     - get address from boot time settings
616  * @prefix: prefix for network device
617  * @unit: id for network device
618  *
619  * Check boot time settings for the base address of device.
620  * The found settings are set for the device to be used
621  * later in the device probing.
622  * Returns 0 if no settings found.
623  */
624 unsigned long netdev_boot_base(const char *prefix, int unit)
625 {
626         const struct netdev_boot_setup *s = dev_boot_setup;
627         char name[IFNAMSIZ];
628         int i;
629
630         sprintf(name, "%s%d", prefix, unit);
631
632         /*
633          * If device already registered then return base of 1
634          * to indicate not to probe for this interface
635          */
636         if (__dev_get_by_name(&init_net, name))
637                 return 1;
638
639         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
640                 if (!strcmp(name, s[i].name))
641                         return s[i].map.base_addr;
642         return 0;
643 }
644
645 /*
646  * Saves at boot time configured settings for any netdevice.
647  */
648 int __init netdev_boot_setup(char *str)
649 {
650         int ints[5];
651         struct ifmap map;
652
653         str = get_options(str, ARRAY_SIZE(ints), ints);
654         if (!str || !*str)
655                 return 0;
656
657         /* Save settings */
658         memset(&map, 0, sizeof(map));
659         if (ints[0] > 0)
660                 map.irq = ints[1];
661         if (ints[0] > 1)
662                 map.base_addr = ints[2];
663         if (ints[0] > 2)
664                 map.mem_start = ints[3];
665         if (ints[0] > 3)
666                 map.mem_end = ints[4];
667
668         /* Add new entry to the list */
669         return netdev_boot_setup_add(str, &map);
670 }
671
672 __setup("netdev=", netdev_boot_setup);
673
674 /*******************************************************************************
675  *
676  *                          Device Interface Subroutines
677  *
678  *******************************************************************************/
679
680 /**
681  *      dev_get_iflink  - get 'iflink' value of a interface
682  *      @dev: targeted interface
683  *
684  *      Indicates the ifindex the interface is linked to.
685  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
686  */
687
688 int dev_get_iflink(const struct net_device *dev)
689 {
690         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
691                 return dev->netdev_ops->ndo_get_iflink(dev);
692
693         return dev->ifindex;
694 }
695 EXPORT_SYMBOL(dev_get_iflink);
696
697 /**
698  *      dev_fill_metadata_dst - Retrieve tunnel egress information.
699  *      @dev: targeted interface
700  *      @skb: The packet.
701  *
702  *      For better visibility of tunnel traffic OVS needs to retrieve
703  *      egress tunnel information for a packet. Following API allows
704  *      user to get this info.
705  */
706 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
707 {
708         struct ip_tunnel_info *info;
709
710         if (!dev->netdev_ops  || !dev->netdev_ops->ndo_fill_metadata_dst)
711                 return -EINVAL;
712
713         info = skb_tunnel_info_unclone(skb);
714         if (!info)
715                 return -ENOMEM;
716         if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
717                 return -EINVAL;
718
719         return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
720 }
721 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
722
723 /**
724  *      __dev_get_by_name       - find a device by its name
725  *      @net: the applicable net namespace
726  *      @name: name to find
727  *
728  *      Find an interface by name. Must be called under RTNL semaphore
729  *      or @dev_base_lock. If the name is found a pointer to the device
730  *      is returned. If the name is not found then %NULL is returned. The
731  *      reference counters are not incremented so the caller must be
732  *      careful with locks.
733  */
734
735 struct net_device *__dev_get_by_name(struct net *net, const char *name)
736 {
737         struct net_device *dev;
738         struct hlist_head *head = dev_name_hash(net, name);
739
740         hlist_for_each_entry(dev, head, name_hlist)
741                 if (!strncmp(dev->name, name, IFNAMSIZ))
742                         return dev;
743
744         return NULL;
745 }
746 EXPORT_SYMBOL(__dev_get_by_name);
747
748 /**
749  * dev_get_by_name_rcu  - find a device by its name
750  * @net: the applicable net namespace
751  * @name: name to find
752  *
753  * Find an interface by name.
754  * If the name is found a pointer to the device is returned.
755  * If the name is not found then %NULL is returned.
756  * The reference counters are not incremented so the caller must be
757  * careful with locks. The caller must hold RCU lock.
758  */
759
760 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
761 {
762         struct net_device *dev;
763         struct hlist_head *head = dev_name_hash(net, name);
764
765         hlist_for_each_entry_rcu(dev, head, name_hlist)
766                 if (!strncmp(dev->name, name, IFNAMSIZ))
767                         return dev;
768
769         return NULL;
770 }
771 EXPORT_SYMBOL(dev_get_by_name_rcu);
772
773 /**
774  *      dev_get_by_name         - find a device by its name
775  *      @net: the applicable net namespace
776  *      @name: name to find
777  *
778  *      Find an interface by name. This can be called from any
779  *      context and does its own locking. The returned handle has
780  *      the usage count incremented and the caller must use dev_put() to
781  *      release it when it is no longer needed. %NULL is returned if no
782  *      matching device is found.
783  */
784
785 struct net_device *dev_get_by_name(struct net *net, const char *name)
786 {
787         struct net_device *dev;
788
789         rcu_read_lock();
790         dev = dev_get_by_name_rcu(net, name);
791         if (dev)
792                 dev_hold(dev);
793         rcu_read_unlock();
794         return dev;
795 }
796 EXPORT_SYMBOL(dev_get_by_name);
797
798 /**
799  *      __dev_get_by_index - find a device by its ifindex
800  *      @net: the applicable net namespace
801  *      @ifindex: index of device
802  *
803  *      Search for an interface by index. Returns %NULL if the device
804  *      is not found or a pointer to the device. The device has not
805  *      had its reference counter increased so the caller must be careful
806  *      about locking. The caller must hold either the RTNL semaphore
807  *      or @dev_base_lock.
808  */
809
810 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
811 {
812         struct net_device *dev;
813         struct hlist_head *head = dev_index_hash(net, ifindex);
814
815         hlist_for_each_entry(dev, head, index_hlist)
816                 if (dev->ifindex == ifindex)
817                         return dev;
818
819         return NULL;
820 }
821 EXPORT_SYMBOL(__dev_get_by_index);
822
823 /**
824  *      dev_get_by_index_rcu - find a device by its ifindex
825  *      @net: the applicable net namespace
826  *      @ifindex: index of device
827  *
828  *      Search for an interface by index. Returns %NULL if the device
829  *      is not found or a pointer to the device. The device has not
830  *      had its reference counter increased so the caller must be careful
831  *      about locking. The caller must hold RCU lock.
832  */
833
834 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
835 {
836         struct net_device *dev;
837         struct hlist_head *head = dev_index_hash(net, ifindex);
838
839         hlist_for_each_entry_rcu(dev, head, index_hlist)
840                 if (dev->ifindex == ifindex)
841                         return dev;
842
843         return NULL;
844 }
845 EXPORT_SYMBOL(dev_get_by_index_rcu);
846
847
848 /**
849  *      dev_get_by_index - find a device by its ifindex
850  *      @net: the applicable net namespace
851  *      @ifindex: index of device
852  *
853  *      Search for an interface by index. Returns NULL if the device
854  *      is not found or a pointer to the device. The device returned has
855  *      had a reference added and the pointer is safe until the user calls
856  *      dev_put to indicate they have finished with it.
857  */
858
859 struct net_device *dev_get_by_index(struct net *net, int ifindex)
860 {
861         struct net_device *dev;
862
863         rcu_read_lock();
864         dev = dev_get_by_index_rcu(net, ifindex);
865         if (dev)
866                 dev_hold(dev);
867         rcu_read_unlock();
868         return dev;
869 }
870 EXPORT_SYMBOL(dev_get_by_index);
871
872 /**
873  *      dev_get_by_napi_id - find a device by napi_id
874  *      @napi_id: ID of the NAPI struct
875  *
876  *      Search for an interface by NAPI ID. Returns %NULL if the device
877  *      is not found or a pointer to the device. The device has not had
878  *      its reference counter increased so the caller must be careful
879  *      about locking. The caller must hold RCU lock.
880  */
881
882 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
883 {
884         struct napi_struct *napi;
885
886         WARN_ON_ONCE(!rcu_read_lock_held());
887
888         if (napi_id < MIN_NAPI_ID)
889                 return NULL;
890
891         napi = napi_by_id(napi_id);
892
893         return napi ? napi->dev : NULL;
894 }
895 EXPORT_SYMBOL(dev_get_by_napi_id);
896
897 /**
898  *      netdev_get_name - get a netdevice name, knowing its ifindex.
899  *      @net: network namespace
900  *      @name: a pointer to the buffer where the name will be stored.
901  *      @ifindex: the ifindex of the interface to get the name from.
902  *
903  *      The use of raw_seqcount_begin() and cond_resched() before
904  *      retrying is required as we want to give the writers a chance
905  *      to complete when CONFIG_PREEMPT is not set.
906  */
907 int netdev_get_name(struct net *net, char *name, int ifindex)
908 {
909         struct net_device *dev;
910         unsigned int seq;
911
912 retry:
913         seq = raw_seqcount_begin(&devnet_rename_seq);
914         rcu_read_lock();
915         dev = dev_get_by_index_rcu(net, ifindex);
916         if (!dev) {
917                 rcu_read_unlock();
918                 return -ENODEV;
919         }
920
921         strcpy(name, dev->name);
922         rcu_read_unlock();
923         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
924                 cond_resched();
925                 goto retry;
926         }
927
928         return 0;
929 }
930
931 /**
932  *      dev_getbyhwaddr_rcu - find a device by its hardware address
933  *      @net: the applicable net namespace
934  *      @type: media type of device
935  *      @ha: hardware address
936  *
937  *      Search for an interface by MAC address. Returns NULL if the device
938  *      is not found or a pointer to the device.
939  *      The caller must hold RCU or RTNL.
940  *      The returned device has not had its ref count increased
941  *      and the caller must therefore be careful about locking
942  *
943  */
944
945 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
946                                        const char *ha)
947 {
948         struct net_device *dev;
949
950         for_each_netdev_rcu(net, dev)
951                 if (dev->type == type &&
952                     !memcmp(dev->dev_addr, ha, dev->addr_len))
953                         return dev;
954
955         return NULL;
956 }
957 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
958
959 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
960 {
961         struct net_device *dev;
962
963         ASSERT_RTNL();
964         for_each_netdev(net, dev)
965                 if (dev->type == type)
966                         return dev;
967
968         return NULL;
969 }
970 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
971
972 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
973 {
974         struct net_device *dev, *ret = NULL;
975
976         rcu_read_lock();
977         for_each_netdev_rcu(net, dev)
978                 if (dev->type == type) {
979                         dev_hold(dev);
980                         ret = dev;
981                         break;
982                 }
983         rcu_read_unlock();
984         return ret;
985 }
986 EXPORT_SYMBOL(dev_getfirstbyhwtype);
987
988 /**
989  *      __dev_get_by_flags - find any device with given flags
990  *      @net: the applicable net namespace
991  *      @if_flags: IFF_* values
992  *      @mask: bitmask of bits in if_flags to check
993  *
994  *      Search for any interface with the given flags. Returns NULL if a device
995  *      is not found or a pointer to the device. Must be called inside
996  *      rtnl_lock(), and result refcount is unchanged.
997  */
998
999 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000                                       unsigned short mask)
1001 {
1002         struct net_device *dev, *ret;
1003
1004         ASSERT_RTNL();
1005
1006         ret = NULL;
1007         for_each_netdev(net, dev) {
1008                 if (((dev->flags ^ if_flags) & mask) == 0) {
1009                         ret = dev;
1010                         break;
1011                 }
1012         }
1013         return ret;
1014 }
1015 EXPORT_SYMBOL(__dev_get_by_flags);
1016
1017 /**
1018  *      dev_valid_name - check if name is okay for network device
1019  *      @name: name string
1020  *
1021  *      Network device names need to be valid file names to
1022  *      to allow sysfs to work.  We also disallow any kind of
1023  *      whitespace.
1024  */
1025 bool dev_valid_name(const char *name)
1026 {
1027         if (*name == '\0')
1028                 return false;
1029         if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1030                 return false;
1031         if (!strcmp(name, ".") || !strcmp(name, ".."))
1032                 return false;
1033
1034         while (*name) {
1035                 if (*name == '/' || *name == ':' || isspace(*name))
1036                         return false;
1037                 name++;
1038         }
1039         return true;
1040 }
1041 EXPORT_SYMBOL(dev_valid_name);
1042
1043 /**
1044  *      __dev_alloc_name - allocate a name for a device
1045  *      @net: network namespace to allocate the device name in
1046  *      @name: name format string
1047  *      @buf:  scratch buffer and result name string
1048  *
1049  *      Passed a format string - eg "lt%d" it will try and find a suitable
1050  *      id. It scans list of devices to build up a free map, then chooses
1051  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1052  *      while allocating the name and adding the device in order to avoid
1053  *      duplicates.
1054  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055  *      Returns the number of the unit assigned or a negative errno code.
1056  */
1057
1058 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1059 {
1060         int i = 0;
1061         const char *p;
1062         const int max_netdevices = 8*PAGE_SIZE;
1063         unsigned long *inuse;
1064         struct net_device *d;
1065
1066         if (!dev_valid_name(name))
1067                 return -EINVAL;
1068
1069         p = strchr(name, '%');
1070         if (p) {
1071                 /*
1072                  * Verify the string as this thing may have come from
1073                  * the user.  There must be either one "%d" and no other "%"
1074                  * characters.
1075                  */
1076                 if (p[1] != 'd' || strchr(p + 2, '%'))
1077                         return -EINVAL;
1078
1079                 /* Use one page as a bit array of possible slots */
1080                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1081                 if (!inuse)
1082                         return -ENOMEM;
1083
1084                 for_each_netdev(net, d) {
1085                         if (!sscanf(d->name, name, &i))
1086                                 continue;
1087                         if (i < 0 || i >= max_netdevices)
1088                                 continue;
1089
1090                         /*  avoid cases where sscanf is not exact inverse of printf */
1091                         snprintf(buf, IFNAMSIZ, name, i);
1092                         if (!strncmp(buf, d->name, IFNAMSIZ))
1093                                 set_bit(i, inuse);
1094                 }
1095
1096                 i = find_first_zero_bit(inuse, max_netdevices);
1097                 free_page((unsigned long) inuse);
1098         }
1099
1100         snprintf(buf, IFNAMSIZ, name, i);
1101         if (!__dev_get_by_name(net, buf))
1102                 return i;
1103
1104         /* It is possible to run out of possible slots
1105          * when the name is long and there isn't enough space left
1106          * for the digits, or if all bits are used.
1107          */
1108         return -ENFILE;
1109 }
1110
1111 static int dev_alloc_name_ns(struct net *net,
1112                              struct net_device *dev,
1113                              const char *name)
1114 {
1115         char buf[IFNAMSIZ];
1116         int ret;
1117
1118         BUG_ON(!net);
1119         ret = __dev_alloc_name(net, name, buf);
1120         if (ret >= 0)
1121                 strlcpy(dev->name, buf, IFNAMSIZ);
1122         return ret;
1123 }
1124
1125 /**
1126  *      dev_alloc_name - allocate a name for a device
1127  *      @dev: device
1128  *      @name: name format string
1129  *
1130  *      Passed a format string - eg "lt%d" it will try and find a suitable
1131  *      id. It scans list of devices to build up a free map, then chooses
1132  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1133  *      while allocating the name and adding the device in order to avoid
1134  *      duplicates.
1135  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1136  *      Returns the number of the unit assigned or a negative errno code.
1137  */
1138
1139 int dev_alloc_name(struct net_device *dev, const char *name)
1140 {
1141         return dev_alloc_name_ns(dev_net(dev), dev, name);
1142 }
1143 EXPORT_SYMBOL(dev_alloc_name);
1144
1145 int dev_get_valid_name(struct net *net, struct net_device *dev,
1146                        const char *name)
1147 {
1148         BUG_ON(!net);
1149
1150         if (!dev_valid_name(name))
1151                 return -EINVAL;
1152
1153         if (strchr(name, '%'))
1154                 return dev_alloc_name_ns(net, dev, name);
1155         else if (__dev_get_by_name(net, name))
1156                 return -EEXIST;
1157         else if (dev->name != name)
1158                 strlcpy(dev->name, name, IFNAMSIZ);
1159
1160         return 0;
1161 }
1162 EXPORT_SYMBOL(dev_get_valid_name);
1163
1164 /**
1165  *      dev_change_name - change name of a device
1166  *      @dev: device
1167  *      @newname: name (or format string) must be at least IFNAMSIZ
1168  *
1169  *      Change name of a device, can pass format strings "eth%d".
1170  *      for wildcarding.
1171  */
1172 int dev_change_name(struct net_device *dev, const char *newname)
1173 {
1174         unsigned char old_assign_type;
1175         char oldname[IFNAMSIZ];
1176         int err = 0;
1177         int ret;
1178         struct net *net;
1179
1180         ASSERT_RTNL();
1181         BUG_ON(!dev_net(dev));
1182
1183         net = dev_net(dev);
1184         if (dev->flags & IFF_UP)
1185                 return -EBUSY;
1186
1187         write_seqcount_begin(&devnet_rename_seq);
1188
1189         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1190                 write_seqcount_end(&devnet_rename_seq);
1191                 return 0;
1192         }
1193
1194         memcpy(oldname, dev->name, IFNAMSIZ);
1195
1196         err = dev_get_valid_name(net, dev, newname);
1197         if (err < 0) {
1198                 write_seqcount_end(&devnet_rename_seq);
1199                 return err;
1200         }
1201
1202         if (oldname[0] && !strchr(oldname, '%'))
1203                 netdev_info(dev, "renamed from %s\n", oldname);
1204
1205         old_assign_type = dev->name_assign_type;
1206         dev->name_assign_type = NET_NAME_RENAMED;
1207
1208 rollback:
1209         ret = device_rename(&dev->dev, dev->name);
1210         if (ret) {
1211                 memcpy(dev->name, oldname, IFNAMSIZ);
1212                 dev->name_assign_type = old_assign_type;
1213                 write_seqcount_end(&devnet_rename_seq);
1214                 return ret;
1215         }
1216
1217         write_seqcount_end(&devnet_rename_seq);
1218
1219         netdev_adjacent_rename_links(dev, oldname);
1220
1221         write_lock_bh(&dev_base_lock);
1222         hlist_del_rcu(&dev->name_hlist);
1223         write_unlock_bh(&dev_base_lock);
1224
1225         synchronize_rcu();
1226
1227         write_lock_bh(&dev_base_lock);
1228         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1229         write_unlock_bh(&dev_base_lock);
1230
1231         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1232         ret = notifier_to_errno(ret);
1233
1234         if (ret) {
1235                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1236                 if (err >= 0) {
1237                         err = ret;
1238                         write_seqcount_begin(&devnet_rename_seq);
1239                         memcpy(dev->name, oldname, IFNAMSIZ);
1240                         memcpy(oldname, newname, IFNAMSIZ);
1241                         dev->name_assign_type = old_assign_type;
1242                         old_assign_type = NET_NAME_RENAMED;
1243                         goto rollback;
1244                 } else {
1245                         pr_err("%s: name change rollback failed: %d\n",
1246                                dev->name, ret);
1247                 }
1248         }
1249
1250         return err;
1251 }
1252
1253 /**
1254  *      dev_set_alias - change ifalias of a device
1255  *      @dev: device
1256  *      @alias: name up to IFALIASZ
1257  *      @len: limit of bytes to copy from info
1258  *
1259  *      Set ifalias for a device,
1260  */
1261 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1262 {
1263         struct dev_ifalias *new_alias = NULL;
1264
1265         if (len >= IFALIASZ)
1266                 return -EINVAL;
1267
1268         if (len) {
1269                 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1270                 if (!new_alias)
1271                         return -ENOMEM;
1272
1273                 memcpy(new_alias->ifalias, alias, len);
1274                 new_alias->ifalias[len] = 0;
1275         }
1276
1277         mutex_lock(&ifalias_mutex);
1278         rcu_swap_protected(dev->ifalias, new_alias,
1279                            mutex_is_locked(&ifalias_mutex));
1280         mutex_unlock(&ifalias_mutex);
1281
1282         if (new_alias)
1283                 kfree_rcu(new_alias, rcuhead);
1284
1285         return len;
1286 }
1287 EXPORT_SYMBOL(dev_set_alias);
1288
1289 /**
1290  *      dev_get_alias - get ifalias of a device
1291  *      @dev: device
1292  *      @name: buffer to store name of ifalias
1293  *      @len: size of buffer
1294  *
1295  *      get ifalias for a device.  Caller must make sure dev cannot go
1296  *      away,  e.g. rcu read lock or own a reference count to device.
1297  */
1298 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1299 {
1300         const struct dev_ifalias *alias;
1301         int ret = 0;
1302
1303         rcu_read_lock();
1304         alias = rcu_dereference(dev->ifalias);
1305         if (alias)
1306                 ret = snprintf(name, len, "%s", alias->ifalias);
1307         rcu_read_unlock();
1308
1309         return ret;
1310 }
1311
1312 /**
1313  *      netdev_features_change - device changes features
1314  *      @dev: device to cause notification
1315  *
1316  *      Called to indicate a device has changed features.
1317  */
1318 void netdev_features_change(struct net_device *dev)
1319 {
1320         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1321 }
1322 EXPORT_SYMBOL(netdev_features_change);
1323
1324 /**
1325  *      netdev_state_change - device changes state
1326  *      @dev: device to cause notification
1327  *
1328  *      Called to indicate a device has changed state. This function calls
1329  *      the notifier chains for netdev_chain and sends a NEWLINK message
1330  *      to the routing socket.
1331  */
1332 void netdev_state_change(struct net_device *dev)
1333 {
1334         if (dev->flags & IFF_UP) {
1335                 struct netdev_notifier_change_info change_info = {
1336                         .info.dev = dev,
1337                 };
1338
1339                 call_netdevice_notifiers_info(NETDEV_CHANGE,
1340                                               &change_info.info);
1341                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1342         }
1343 }
1344 EXPORT_SYMBOL(netdev_state_change);
1345
1346 /**
1347  * netdev_notify_peers - notify network peers about existence of @dev
1348  * @dev: network device
1349  *
1350  * Generate traffic such that interested network peers are aware of
1351  * @dev, such as by generating a gratuitous ARP. This may be used when
1352  * a device wants to inform the rest of the network about some sort of
1353  * reconfiguration such as a failover event or virtual machine
1354  * migration.
1355  */
1356 void netdev_notify_peers(struct net_device *dev)
1357 {
1358         rtnl_lock();
1359         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1360         call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1361         rtnl_unlock();
1362 }
1363 EXPORT_SYMBOL(netdev_notify_peers);
1364
1365 static int __dev_open(struct net_device *dev)
1366 {
1367         const struct net_device_ops *ops = dev->netdev_ops;
1368         int ret;
1369
1370         ASSERT_RTNL();
1371
1372         if (!netif_device_present(dev))
1373                 return -ENODEV;
1374
1375         /* Block netpoll from trying to do any rx path servicing.
1376          * If we don't do this there is a chance ndo_poll_controller
1377          * or ndo_poll may be running while we open the device
1378          */
1379         netpoll_poll_disable(dev);
1380
1381         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1382         ret = notifier_to_errno(ret);
1383         if (ret)
1384                 return ret;
1385
1386         set_bit(__LINK_STATE_START, &dev->state);
1387
1388         if (ops->ndo_validate_addr)
1389                 ret = ops->ndo_validate_addr(dev);
1390
1391         if (!ret && ops->ndo_open)
1392                 ret = ops->ndo_open(dev);
1393
1394         netpoll_poll_enable(dev);
1395
1396         if (ret)
1397                 clear_bit(__LINK_STATE_START, &dev->state);
1398         else {
1399                 dev->flags |= IFF_UP;
1400                 dev_set_rx_mode(dev);
1401                 dev_activate(dev);
1402                 add_device_randomness(dev->dev_addr, dev->addr_len);
1403         }
1404
1405         return ret;
1406 }
1407
1408 /**
1409  *      dev_open        - prepare an interface for use.
1410  *      @dev:   device to open
1411  *
1412  *      Takes a device from down to up state. The device's private open
1413  *      function is invoked and then the multicast lists are loaded. Finally
1414  *      the device is moved into the up state and a %NETDEV_UP message is
1415  *      sent to the netdev notifier chain.
1416  *
1417  *      Calling this function on an active interface is a nop. On a failure
1418  *      a negative errno code is returned.
1419  */
1420 int dev_open(struct net_device *dev)
1421 {
1422         int ret;
1423
1424         if (dev->flags & IFF_UP)
1425                 return 0;
1426
1427         ret = __dev_open(dev);
1428         if (ret < 0)
1429                 return ret;
1430
1431         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1432         call_netdevice_notifiers(NETDEV_UP, dev);
1433
1434         return ret;
1435 }
1436 EXPORT_SYMBOL(dev_open);
1437
1438 static void __dev_close_many(struct list_head *head)
1439 {
1440         struct net_device *dev;
1441
1442         ASSERT_RTNL();
1443         might_sleep();
1444
1445         list_for_each_entry(dev, head, close_list) {
1446                 /* Temporarily disable netpoll until the interface is down */
1447                 netpoll_poll_disable(dev);
1448
1449                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1450
1451                 clear_bit(__LINK_STATE_START, &dev->state);
1452
1453                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1454                  * can be even on different cpu. So just clear netif_running().
1455                  *
1456                  * dev->stop() will invoke napi_disable() on all of it's
1457                  * napi_struct instances on this device.
1458                  */
1459                 smp_mb__after_atomic(); /* Commit netif_running(). */
1460         }
1461
1462         dev_deactivate_many(head);
1463
1464         list_for_each_entry(dev, head, close_list) {
1465                 const struct net_device_ops *ops = dev->netdev_ops;
1466
1467                 /*
1468                  *      Call the device specific close. This cannot fail.
1469                  *      Only if device is UP
1470                  *
1471                  *      We allow it to be called even after a DETACH hot-plug
1472                  *      event.
1473                  */
1474                 if (ops->ndo_stop)
1475                         ops->ndo_stop(dev);
1476
1477                 dev->flags &= ~IFF_UP;
1478                 netpoll_poll_enable(dev);
1479         }
1480 }
1481
1482 static void __dev_close(struct net_device *dev)
1483 {
1484         LIST_HEAD(single);
1485
1486         list_add(&dev->close_list, &single);
1487         __dev_close_many(&single);
1488         list_del(&single);
1489 }
1490
1491 void dev_close_many(struct list_head *head, bool unlink)
1492 {
1493         struct net_device *dev, *tmp;
1494
1495         /* Remove the devices that don't need to be closed */
1496         list_for_each_entry_safe(dev, tmp, head, close_list)
1497                 if (!(dev->flags & IFF_UP))
1498                         list_del_init(&dev->close_list);
1499
1500         __dev_close_many(head);
1501
1502         list_for_each_entry_safe(dev, tmp, head, close_list) {
1503                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1504                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1505                 if (unlink)
1506                         list_del_init(&dev->close_list);
1507         }
1508 }
1509 EXPORT_SYMBOL(dev_close_many);
1510
1511 /**
1512  *      dev_close - shutdown an interface.
1513  *      @dev: device to shutdown
1514  *
1515  *      This function moves an active device into down state. A
1516  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1517  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1518  *      chain.
1519  */
1520 void dev_close(struct net_device *dev)
1521 {
1522         if (dev->flags & IFF_UP) {
1523                 LIST_HEAD(single);
1524
1525                 list_add(&dev->close_list, &single);
1526                 dev_close_many(&single, true);
1527                 list_del(&single);
1528         }
1529 }
1530 EXPORT_SYMBOL(dev_close);
1531
1532
1533 /**
1534  *      dev_disable_lro - disable Large Receive Offload on a device
1535  *      @dev: device
1536  *
1537  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1538  *      called under RTNL.  This is needed if received packets may be
1539  *      forwarded to another interface.
1540  */
1541 void dev_disable_lro(struct net_device *dev)
1542 {
1543         struct net_device *lower_dev;
1544         struct list_head *iter;
1545
1546         dev->wanted_features &= ~NETIF_F_LRO;
1547         netdev_update_features(dev);
1548
1549         if (unlikely(dev->features & NETIF_F_LRO))
1550                 netdev_WARN(dev, "failed to disable LRO!\n");
1551
1552         netdev_for_each_lower_dev(dev, lower_dev, iter)
1553                 dev_disable_lro(lower_dev);
1554 }
1555 EXPORT_SYMBOL(dev_disable_lro);
1556
1557 /**
1558  *      dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1559  *      @dev: device
1560  *
1561  *      Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
1562  *      called under RTNL.  This is needed if Generic XDP is installed on
1563  *      the device.
1564  */
1565 static void dev_disable_gro_hw(struct net_device *dev)
1566 {
1567         dev->wanted_features &= ~NETIF_F_GRO_HW;
1568         netdev_update_features(dev);
1569
1570         if (unlikely(dev->features & NETIF_F_GRO_HW))
1571                 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1572 }
1573
1574 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1575 {
1576 #define N(val)                                          \
1577         case NETDEV_##val:                              \
1578                 return "NETDEV_" __stringify(val);
1579         switch (cmd) {
1580         N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1581         N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1582         N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1583         N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1584         N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1585         N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1586         N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1587         N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1588         N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1589         }
1590 #undef N
1591         return "UNKNOWN_NETDEV_EVENT";
1592 }
1593 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1594
1595 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1596                                    struct net_device *dev)
1597 {
1598         struct netdev_notifier_info info = {
1599                 .dev = dev,
1600         };
1601
1602         return nb->notifier_call(nb, val, &info);
1603 }
1604
1605 static int dev_boot_phase = 1;
1606
1607 /**
1608  * register_netdevice_notifier - register a network notifier block
1609  * @nb: notifier
1610  *
1611  * Register a notifier to be called when network device events occur.
1612  * The notifier passed is linked into the kernel structures and must
1613  * not be reused until it has been unregistered. A negative errno code
1614  * is returned on a failure.
1615  *
1616  * When registered all registration and up events are replayed
1617  * to the new notifier to allow device to have a race free
1618  * view of the network device list.
1619  */
1620
1621 int register_netdevice_notifier(struct notifier_block *nb)
1622 {
1623         struct net_device *dev;
1624         struct net_device *last;
1625         struct net *net;
1626         int err;
1627
1628         /* Close race with setup_net() and cleanup_net() */
1629         down_write(&pernet_ops_rwsem);
1630         rtnl_lock();
1631         err = raw_notifier_chain_register(&netdev_chain, nb);
1632         if (err)
1633                 goto unlock;
1634         if (dev_boot_phase)
1635                 goto unlock;
1636         for_each_net(net) {
1637                 for_each_netdev(net, dev) {
1638                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1639                         err = notifier_to_errno(err);
1640                         if (err)
1641                                 goto rollback;
1642
1643                         if (!(dev->flags & IFF_UP))
1644                                 continue;
1645
1646                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1647                 }
1648         }
1649
1650 unlock:
1651         rtnl_unlock();
1652         up_write(&pernet_ops_rwsem);
1653         return err;
1654
1655 rollback:
1656         last = dev;
1657         for_each_net(net) {
1658                 for_each_netdev(net, dev) {
1659                         if (dev == last)
1660                                 goto outroll;
1661
1662                         if (dev->flags & IFF_UP) {
1663                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1664                                                         dev);
1665                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1666                         }
1667                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1668                 }
1669         }
1670
1671 outroll:
1672         raw_notifier_chain_unregister(&netdev_chain, nb);
1673         goto unlock;
1674 }
1675 EXPORT_SYMBOL(register_netdevice_notifier);
1676
1677 /**
1678  * unregister_netdevice_notifier - unregister a network notifier block
1679  * @nb: notifier
1680  *
1681  * Unregister a notifier previously registered by
1682  * register_netdevice_notifier(). The notifier is unlinked into the
1683  * kernel structures and may then be reused. A negative errno code
1684  * is returned on a failure.
1685  *
1686  * After unregistering unregister and down device events are synthesized
1687  * for all devices on the device list to the removed notifier to remove
1688  * the need for special case cleanup code.
1689  */
1690
1691 int unregister_netdevice_notifier(struct notifier_block *nb)
1692 {
1693         struct net_device *dev;
1694         struct net *net;
1695         int err;
1696
1697         /* Close race with setup_net() and cleanup_net() */
1698         down_write(&pernet_ops_rwsem);
1699         rtnl_lock();
1700         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1701         if (err)
1702                 goto unlock;
1703
1704         for_each_net(net) {
1705                 for_each_netdev(net, dev) {
1706                         if (dev->flags & IFF_UP) {
1707                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1708                                                         dev);
1709                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1710                         }
1711                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1712                 }
1713         }
1714 unlock:
1715         rtnl_unlock();
1716         up_write(&pernet_ops_rwsem);
1717         return err;
1718 }
1719 EXPORT_SYMBOL(unregister_netdevice_notifier);
1720
1721 /**
1722  *      call_netdevice_notifiers_info - call all network notifier blocks
1723  *      @val: value passed unmodified to notifier function
1724  *      @info: notifier information data
1725  *
1726  *      Call all network notifier blocks.  Parameters and return value
1727  *      are as for raw_notifier_call_chain().
1728  */
1729
1730 static int call_netdevice_notifiers_info(unsigned long val,
1731                                          struct netdev_notifier_info *info)
1732 {
1733         ASSERT_RTNL();
1734         return raw_notifier_call_chain(&netdev_chain, val, info);
1735 }
1736
1737 /**
1738  *      call_netdevice_notifiers - call all network notifier blocks
1739  *      @val: value passed unmodified to notifier function
1740  *      @dev: net_device pointer passed unmodified to notifier function
1741  *
1742  *      Call all network notifier blocks.  Parameters and return value
1743  *      are as for raw_notifier_call_chain().
1744  */
1745
1746 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1747 {
1748         struct netdev_notifier_info info = {
1749                 .dev = dev,
1750         };
1751
1752         return call_netdevice_notifiers_info(val, &info);
1753 }
1754 EXPORT_SYMBOL(call_netdevice_notifiers);
1755
1756 #ifdef CONFIG_NET_INGRESS
1757 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
1758
1759 void net_inc_ingress_queue(void)
1760 {
1761         static_branch_inc(&ingress_needed_key);
1762 }
1763 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1764
1765 void net_dec_ingress_queue(void)
1766 {
1767         static_branch_dec(&ingress_needed_key);
1768 }
1769 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1770 #endif
1771
1772 #ifdef CONFIG_NET_EGRESS
1773 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1774
1775 void net_inc_egress_queue(void)
1776 {
1777         static_branch_inc(&egress_needed_key);
1778 }
1779 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1780
1781 void net_dec_egress_queue(void)
1782 {
1783         static_branch_dec(&egress_needed_key);
1784 }
1785 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1786 #endif
1787
1788 static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
1789 #ifdef HAVE_JUMP_LABEL
1790 static atomic_t netstamp_needed_deferred;
1791 static atomic_t netstamp_wanted;
1792 static void netstamp_clear(struct work_struct *work)
1793 {
1794         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1795         int wanted;
1796
1797         wanted = atomic_add_return(deferred, &netstamp_wanted);
1798         if (wanted > 0)
1799                 static_branch_enable(&netstamp_needed_key);
1800         else
1801                 static_branch_disable(&netstamp_needed_key);
1802 }
1803 static DECLARE_WORK(netstamp_work, netstamp_clear);
1804 #endif
1805
1806 void net_enable_timestamp(void)
1807 {
1808 #ifdef HAVE_JUMP_LABEL
1809         int wanted;
1810
1811         while (1) {
1812                 wanted = atomic_read(&netstamp_wanted);
1813                 if (wanted <= 0)
1814                         break;
1815                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
1816                         return;
1817         }
1818         atomic_inc(&netstamp_needed_deferred);
1819         schedule_work(&netstamp_work);
1820 #else
1821         static_branch_inc(&netstamp_needed_key);
1822 #endif
1823 }
1824 EXPORT_SYMBOL(net_enable_timestamp);
1825
1826 void net_disable_timestamp(void)
1827 {
1828 #ifdef HAVE_JUMP_LABEL
1829         int wanted;
1830
1831         while (1) {
1832                 wanted = atomic_read(&netstamp_wanted);
1833                 if (wanted <= 1)
1834                         break;
1835                 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
1836                         return;
1837         }
1838         atomic_dec(&netstamp_needed_deferred);
1839         schedule_work(&netstamp_work);
1840 #else
1841         static_branch_dec(&netstamp_needed_key);
1842 #endif
1843 }
1844 EXPORT_SYMBOL(net_disable_timestamp);
1845
1846 static inline void net_timestamp_set(struct sk_buff *skb)
1847 {
1848         skb->tstamp = 0;
1849         if (static_branch_unlikely(&netstamp_needed_key))
1850                 __net_timestamp(skb);
1851 }
1852
1853 #define net_timestamp_check(COND, SKB)                          \
1854         if (static_branch_unlikely(&netstamp_needed_key)) {     \
1855                 if ((COND) && !(SKB)->tstamp)                   \
1856                         __net_timestamp(SKB);                   \
1857         }                                                       \
1858
1859 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
1860 {
1861         unsigned int len;
1862
1863         if (!(dev->flags & IFF_UP))
1864                 return false;
1865
1866         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1867         if (skb->len <= len)
1868                 return true;
1869
1870         /* if TSO is enabled, we don't care about the length as the packet
1871          * could be forwarded without being segmented before
1872          */
1873         if (skb_is_gso(skb))
1874                 return true;
1875
1876         return false;
1877 }
1878 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1879
1880 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1881 {
1882         int ret = ____dev_forward_skb(dev, skb);
1883
1884         if (likely(!ret)) {
1885                 skb->protocol = eth_type_trans(skb, dev);
1886                 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1887         }
1888
1889         return ret;
1890 }
1891 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1892
1893 /**
1894  * dev_forward_skb - loopback an skb to another netif
1895  *
1896  * @dev: destination network device
1897  * @skb: buffer to forward
1898  *
1899  * return values:
1900  *      NET_RX_SUCCESS  (no congestion)
1901  *      NET_RX_DROP     (packet was dropped, but freed)
1902  *
1903  * dev_forward_skb can be used for injecting an skb from the
1904  * start_xmit function of one device into the receive queue
1905  * of another device.
1906  *
1907  * The receiving device may be in another namespace, so
1908  * we have to clear all information in the skb that could
1909  * impact namespace isolation.
1910  */
1911 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1912 {
1913         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1914 }
1915 EXPORT_SYMBOL_GPL(dev_forward_skb);
1916
1917 static inline int deliver_skb(struct sk_buff *skb,
1918                               struct packet_type *pt_prev,
1919                               struct net_device *orig_dev)
1920 {
1921         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1922                 return -ENOMEM;
1923         refcount_inc(&skb->users);
1924         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1925 }
1926
1927 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1928                                           struct packet_type **pt,
1929                                           struct net_device *orig_dev,
1930                                           __be16 type,
1931                                           struct list_head *ptype_list)
1932 {
1933         struct packet_type *ptype, *pt_prev = *pt;
1934
1935         list_for_each_entry_rcu(ptype, ptype_list, list) {
1936                 if (ptype->type != type)
1937                         continue;
1938                 if (pt_prev)
1939                         deliver_skb(skb, pt_prev, orig_dev);
1940                 pt_prev = ptype;
1941         }
1942         *pt = pt_prev;
1943 }
1944
1945 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1946 {
1947         if (!ptype->af_packet_priv || !skb->sk)
1948                 return false;
1949
1950         if (ptype->id_match)
1951                 return ptype->id_match(ptype, skb->sk);
1952         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1953                 return true;
1954
1955         return false;
1956 }
1957
1958 /*
1959  *      Support routine. Sends outgoing frames to any network
1960  *      taps currently in use.
1961  */
1962
1963 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1964 {
1965         struct packet_type *ptype;
1966         struct sk_buff *skb2 = NULL;
1967         struct packet_type *pt_prev = NULL;
1968         struct list_head *ptype_list = &ptype_all;
1969
1970         rcu_read_lock();
1971 again:
1972         list_for_each_entry_rcu(ptype, ptype_list, list) {
1973                 /* Never send packets back to the socket
1974                  * they originated from - MvS (miquels@drinkel.ow.org)
1975                  */
1976                 if (skb_loop_sk(ptype, skb))
1977                         continue;
1978
1979                 if (pt_prev) {
1980                         deliver_skb(skb2, pt_prev, skb->dev);
1981                         pt_prev = ptype;
1982                         continue;
1983                 }
1984
1985                 /* need to clone skb, done only once */
1986                 skb2 = skb_clone(skb, GFP_ATOMIC);
1987                 if (!skb2)
1988                         goto out_unlock;
1989
1990                 net_timestamp_set(skb2);
1991
1992                 /* skb->nh should be correctly
1993                  * set by sender, so that the second statement is
1994                  * just protection against buggy protocols.
1995                  */
1996                 skb_reset_mac_header(skb2);
1997
1998                 if (skb_network_header(skb2) < skb2->data ||
1999                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2000                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2001                                              ntohs(skb2->protocol),
2002                                              dev->name);
2003                         skb_reset_network_header(skb2);
2004                 }
2005
2006                 skb2->transport_header = skb2->network_header;
2007                 skb2->pkt_type = PACKET_OUTGOING;
2008                 pt_prev = ptype;
2009         }
2010
2011         if (ptype_list == &ptype_all) {
2012                 ptype_list = &dev->ptype_all;
2013                 goto again;
2014         }
2015 out_unlock:
2016         if (pt_prev) {
2017                 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2018                         pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2019                 else
2020                         kfree_skb(skb2);
2021         }
2022         rcu_read_unlock();
2023 }
2024 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2025
2026 /**
2027  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2028  * @dev: Network device
2029  * @txq: number of queues available
2030  *
2031  * If real_num_tx_queues is changed the tc mappings may no longer be
2032  * valid. To resolve this verify the tc mapping remains valid and if
2033  * not NULL the mapping. With no priorities mapping to this
2034  * offset/count pair it will no longer be used. In the worst case TC0
2035  * is invalid nothing can be done so disable priority mappings. If is
2036  * expected that drivers will fix this mapping if they can before
2037  * calling netif_set_real_num_tx_queues.
2038  */
2039 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2040 {
2041         int i;
2042         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2043
2044         /* If TC0 is invalidated disable TC mapping */
2045         if (tc->offset + tc->count > txq) {
2046                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2047                 dev->num_tc = 0;
2048                 return;
2049         }
2050
2051         /* Invalidated prio to tc mappings set to TC0 */
2052         for (i = 1; i < TC_BITMASK + 1; i++) {
2053                 int q = netdev_get_prio_tc_map(dev, i);
2054
2055                 tc = &dev->tc_to_txq[q];
2056                 if (tc->offset + tc->count > txq) {
2057                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2058                                 i, q);
2059                         netdev_set_prio_tc_map(dev, i, 0);
2060                 }
2061         }
2062 }
2063
2064 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2065 {
2066         if (dev->num_tc) {
2067                 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2068                 int i;
2069
2070                 /* walk through the TCs and see if it falls into any of them */
2071                 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2072                         if ((txq - tc->offset) < tc->count)
2073                                 return i;
2074                 }
2075
2076                 /* didn't find it, just return -1 to indicate no match */
2077                 return -1;
2078         }
2079
2080         return 0;
2081 }
2082 EXPORT_SYMBOL(netdev_txq_to_tc);
2083
2084 #ifdef CONFIG_XPS
2085 struct static_key xps_needed __read_mostly;
2086 EXPORT_SYMBOL(xps_needed);
2087 struct static_key xps_rxqs_needed __read_mostly;
2088 EXPORT_SYMBOL(xps_rxqs_needed);
2089 static DEFINE_MUTEX(xps_map_mutex);
2090 #define xmap_dereference(P)             \
2091         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2092
2093 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2094                              int tci, u16 index)
2095 {
2096         struct xps_map *map = NULL;
2097         int pos;
2098
2099         if (dev_maps)
2100                 map = xmap_dereference(dev_maps->attr_map[tci]);
2101         if (!map)
2102                 return false;
2103
2104         for (pos = map->len; pos--;) {
2105                 if (map->queues[pos] != index)
2106                         continue;
2107
2108                 if (map->len > 1) {
2109                         map->queues[pos] = map->queues[--map->len];
2110                         break;
2111                 }
2112
2113                 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2114                 kfree_rcu(map, rcu);
2115                 return false;
2116         }
2117
2118         return true;
2119 }
2120
2121 static bool remove_xps_queue_cpu(struct net_device *dev,
2122                                  struct xps_dev_maps *dev_maps,
2123                                  int cpu, u16 offset, u16 count)
2124 {
2125         int num_tc = dev->num_tc ? : 1;
2126         bool active = false;
2127         int tci;
2128
2129         for (tci = cpu * num_tc; num_tc--; tci++) {
2130                 int i, j;
2131
2132                 for (i = count, j = offset; i--; j++) {
2133                         if (!remove_xps_queue(dev_maps, tci, j))
2134                                 break;
2135                 }
2136
2137                 active |= i < 0;
2138         }
2139
2140         return active;
2141 }
2142
2143 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2144                            struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2145                            u16 offset, u16 count, bool is_rxqs_map)
2146 {
2147         bool active = false;
2148         int i, j;
2149
2150         for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2151              j < nr_ids;)
2152                 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2153                                                count);
2154         if (!active) {
2155                 if (is_rxqs_map) {
2156                         RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2157                 } else {
2158                         RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2159
2160                         for (i = offset + (count - 1); count--; i--)
2161                                 netdev_queue_numa_node_write(
2162                                         netdev_get_tx_queue(dev, i),
2163                                                         NUMA_NO_NODE);
2164                 }
2165                 kfree_rcu(dev_maps, rcu);
2166         }
2167 }
2168
2169 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2170                                    u16 count)
2171 {
2172         const unsigned long *possible_mask = NULL;
2173         struct xps_dev_maps *dev_maps;
2174         unsigned int nr_ids;
2175
2176         if (!static_key_false(&xps_needed))
2177                 return;
2178
2179         mutex_lock(&xps_map_mutex);
2180
2181         if (static_key_false(&xps_rxqs_needed)) {
2182                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2183                 if (dev_maps) {
2184                         nr_ids = dev->num_rx_queues;
2185                         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2186                                        offset, count, true);
2187                 }
2188         }
2189
2190         dev_maps = xmap_dereference(dev->xps_cpus_map);
2191         if (!dev_maps)
2192                 goto out_no_maps;
2193
2194         if (num_possible_cpus() > 1)
2195                 possible_mask = cpumask_bits(cpu_possible_mask);
2196         nr_ids = nr_cpu_ids;
2197         clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2198                        false);
2199
2200 out_no_maps:
2201         if (static_key_enabled(&xps_rxqs_needed))
2202                 static_key_slow_dec(&xps_rxqs_needed);
2203
2204         static_key_slow_dec(&xps_needed);
2205         mutex_unlock(&xps_map_mutex);
2206 }
2207
2208 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2209 {
2210         netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2211 }
2212
2213 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2214                                       u16 index, bool is_rxqs_map)
2215 {
2216         struct xps_map *new_map;
2217         int alloc_len = XPS_MIN_MAP_ALLOC;
2218         int i, pos;
2219
2220         for (pos = 0; map && pos < map->len; pos++) {
2221                 if (map->queues[pos] != index)
2222                         continue;
2223                 return map;
2224         }
2225
2226         /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2227         if (map) {
2228                 if (pos < map->alloc_len)
2229                         return map;
2230
2231                 alloc_len = map->alloc_len * 2;
2232         }
2233
2234         /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2235          *  map
2236          */
2237         if (is_rxqs_map)
2238                 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2239         else
2240                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2241                                        cpu_to_node(attr_index));
2242         if (!new_map)
2243                 return NULL;
2244
2245         for (i = 0; i < pos; i++)
2246                 new_map->queues[i] = map->queues[i];
2247         new_map->alloc_len = alloc_len;
2248         new_map->len = pos;
2249
2250         return new_map;
2251 }
2252
2253 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2254                           u16 index, bool is_rxqs_map)
2255 {
2256         const unsigned long *online_mask = NULL, *possible_mask = NULL;
2257         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2258         int i, j, tci, numa_node_id = -2;
2259         int maps_sz, num_tc = 1, tc = 0;
2260         struct xps_map *map, *new_map;
2261         bool active = false;
2262         unsigned int nr_ids;
2263
2264         if (dev->num_tc) {
2265                 /* Do not allow XPS on subordinate device directly */
2266                 num_tc = dev->num_tc;
2267                 if (num_tc < 0)
2268                         return -EINVAL;
2269
2270                 /* If queue belongs to subordinate dev use its map */
2271                 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2272
2273                 tc = netdev_txq_to_tc(dev, index);
2274                 if (tc < 0)
2275                         return -EINVAL;
2276         }
2277
2278         mutex_lock(&xps_map_mutex);
2279         if (is_rxqs_map) {
2280                 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2281                 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2282                 nr_ids = dev->num_rx_queues;
2283         } else {
2284                 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2285                 if (num_possible_cpus() > 1) {
2286                         online_mask = cpumask_bits(cpu_online_mask);
2287                         possible_mask = cpumask_bits(cpu_possible_mask);
2288                 }
2289                 dev_maps = xmap_dereference(dev->xps_cpus_map);
2290                 nr_ids = nr_cpu_ids;
2291         }
2292
2293         if (maps_sz < L1_CACHE_BYTES)
2294                 maps_sz = L1_CACHE_BYTES;
2295
2296         /* allocate memory for queue storage */
2297         for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2298              j < nr_ids;) {
2299                 if (!new_dev_maps)
2300                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2301                 if (!new_dev_maps) {
2302                         mutex_unlock(&xps_map_mutex);
2303                         return -ENOMEM;
2304                 }
2305
2306                 tci = j * num_tc + tc;
2307                 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
2308                                  NULL;
2309
2310                 map = expand_xps_map(map, j, index, is_rxqs_map);
2311                 if (!map)
2312                         goto error;
2313
2314                 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2315         }
2316
2317         if (!new_dev_maps)
2318                 goto out_no_new_maps;
2319
2320         static_key_slow_inc(&xps_needed);
2321         if (is_rxqs_map)
2322                 static_key_slow_inc(&xps_rxqs_needed);
2323
2324         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2325              j < nr_ids;) {
2326                 /* copy maps belonging to foreign traffic classes */
2327                 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
2328                         /* fill in the new device map from the old device map */
2329                         map = xmap_dereference(dev_maps->attr_map[tci]);
2330                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2331                 }
2332
2333                 /* We need to explicitly update tci as prevous loop
2334                  * could break out early if dev_maps is NULL.
2335                  */
2336                 tci = j * num_tc + tc;
2337
2338                 if (netif_attr_test_mask(j, mask, nr_ids) &&
2339                     netif_attr_test_online(j, online_mask, nr_ids)) {
2340                         /* add tx-queue to CPU/rx-queue maps */
2341                         int pos = 0;
2342
2343                         map = xmap_dereference(new_dev_maps->attr_map[tci]);
2344                         while ((pos < map->len) && (map->queues[pos] != index))
2345                                 pos++;
2346
2347                         if (pos == map->len)
2348                                 map->queues[map->len++] = index;
2349 #ifdef CONFIG_NUMA
2350                         if (!is_rxqs_map) {
2351                                 if (numa_node_id == -2)
2352                                         numa_node_id = cpu_to_node(j);
2353                                 else if (numa_node_id != cpu_to_node(j))
2354                                         numa_node_id = -1;
2355                         }
2356 #endif
2357                 } else if (dev_maps) {
2358                         /* fill in the new device map from the old device map */
2359                         map = xmap_dereference(dev_maps->attr_map[tci]);
2360                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2361                 }
2362
2363                 /* copy maps belonging to foreign traffic classes */
2364                 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2365                         /* fill in the new device map from the old device map */
2366                         map = xmap_dereference(dev_maps->attr_map[tci]);
2367                         RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2368                 }
2369         }
2370
2371         if (is_rxqs_map)
2372                 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2373         else
2374                 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
2375
2376         /* Cleanup old maps */
2377         if (!dev_maps)
2378                 goto out_no_old_maps;
2379
2380         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2381              j < nr_ids;) {
2382                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2383                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2384                         map = xmap_dereference(dev_maps->attr_map[tci]);
2385                         if (map && map != new_map)
2386                                 kfree_rcu(map, rcu);
2387                 }
2388         }
2389
2390         kfree_rcu(dev_maps, rcu);
2391
2392 out_no_old_maps:
2393         dev_maps = new_dev_maps;
2394         active = true;
2395
2396 out_no_new_maps:
2397         if (!is_rxqs_map) {
2398                 /* update Tx queue numa node */
2399                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2400                                              (numa_node_id >= 0) ?
2401                                              numa_node_id : NUMA_NO_NODE);
2402         }
2403
2404         if (!dev_maps)
2405                 goto out_no_maps;
2406
2407         /* removes tx-queue from unused CPUs/rx-queues */
2408         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2409              j < nr_ids;) {
2410                 for (i = tc, tci = j * num_tc; i--; tci++)
2411                         active |= remove_xps_queue(dev_maps, tci, index);
2412                 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2413                     !netif_attr_test_online(j, online_mask, nr_ids))
2414                         active |= remove_xps_queue(dev_maps, tci, index);
2415                 for (i = num_tc - tc, tci++; --i; tci++)
2416                         active |= remove_xps_queue(dev_maps, tci, index);
2417         }
2418
2419         /* free map if not active */
2420         if (!active) {
2421                 if (is_rxqs_map)
2422                         RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2423                 else
2424                         RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2425                 kfree_rcu(dev_maps, rcu);
2426         }
2427
2428 out_no_maps:
2429         mutex_unlock(&xps_map_mutex);
2430
2431         return 0;
2432 error:
2433         /* remove any maps that we added */
2434         for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2435              j < nr_ids;) {
2436                 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2437                         new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2438                         map = dev_maps ?
2439                               xmap_dereference(dev_maps->attr_map[tci]) :
2440                               NULL;
2441                         if (new_map && new_map != map)
2442                                 kfree(new_map);
2443                 }
2444         }
2445
2446         mutex_unlock(&xps_map_mutex);
2447
2448         kfree(new_dev_maps);
2449         return -ENOMEM;
2450 }
2451
2452 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2453                         u16 index)
2454 {
2455         return __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2456 }
2457 EXPORT_SYMBOL(netif_set_xps_queue);
2458
2459 #endif
2460 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2461 {
2462         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2463
2464         /* Unbind any subordinate channels */
2465         while (txq-- != &dev->_tx[0]) {
2466                 if (txq->sb_dev)
2467                         netdev_unbind_sb_channel(dev, txq->sb_dev);
2468         }
2469 }
2470
2471 void netdev_reset_tc(struct net_device *dev)
2472 {
2473 #ifdef CONFIG_XPS
2474         netif_reset_xps_queues_gt(dev, 0);
2475 #endif
2476         netdev_unbind_all_sb_channels(dev);
2477
2478         /* Reset TC configuration of device */
2479         dev->num_tc = 0;
2480         memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2481         memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2482 }
2483 EXPORT_SYMBOL(netdev_reset_tc);
2484
2485 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2486 {
2487         if (tc >= dev->num_tc)
2488                 return -EINVAL;
2489
2490 #ifdef CONFIG_XPS
2491         netif_reset_xps_queues(dev, offset, count);
2492 #endif
2493         dev->tc_to_txq[tc].count = count;
2494         dev->tc_to_txq[tc].offset = offset;
2495         return 0;
2496 }
2497 EXPORT_SYMBOL(netdev_set_tc_queue);
2498
2499 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2500 {
2501         if (num_tc > TC_MAX_QUEUE)
2502                 return -EINVAL;
2503
2504 #ifdef CONFIG_XPS
2505         netif_reset_xps_queues_gt(dev, 0);
2506 #endif
2507         netdev_unbind_all_sb_channels(dev);
2508
2509         dev->num_tc = num_tc;
2510         return 0;
2511 }
2512 EXPORT_SYMBOL(netdev_set_num_tc);
2513
2514 void netdev_unbind_sb_channel(struct net_device *dev,
2515                               struct net_device *sb_dev)
2516 {
2517         struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2518
2519 #ifdef CONFIG_XPS
2520         netif_reset_xps_queues_gt(sb_dev, 0);
2521 #endif
2522         memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2523         memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2524
2525         while (txq-- != &dev->_tx[0]) {
2526                 if (txq->sb_dev == sb_dev)
2527                         txq->sb_dev = NULL;
2528         }
2529 }
2530 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2531
2532 int netdev_bind_sb_channel_queue(struct net_device *dev,
2533                                  struct net_device *sb_dev,
2534                                  u8 tc, u16 count, u16 offset)
2535 {
2536         /* Make certain the sb_dev and dev are already configured */
2537         if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2538                 return -EINVAL;
2539
2540         /* We cannot hand out queues we don't have */
2541         if ((offset + count) > dev->real_num_tx_queues)
2542                 return -EINVAL;
2543
2544         /* Record the mapping */
2545         sb_dev->tc_to_txq[tc].count = count;
2546         sb_dev->tc_to_txq[tc].offset = offset;
2547
2548         /* Provide a way for Tx queue to find the tc_to_txq map or
2549          * XPS map for itself.
2550          */
2551         while (count--)
2552                 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2553
2554         return 0;
2555 }
2556 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2557
2558 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2559 {
2560         /* Do not use a multiqueue device to represent a subordinate channel */
2561         if (netif_is_multiqueue(dev))
2562                 return -ENODEV;
2563
2564         /* We allow channels 1 - 32767 to be used for subordinate channels.
2565          * Channel 0 is meant to be "native" mode and used only to represent
2566          * the main root device. We allow writing 0 to reset the device back
2567          * to normal mode after being used as a subordinate channel.
2568          */
2569         if (channel > S16_MAX)
2570                 return -EINVAL;
2571
2572         dev->num_tc = -channel;
2573
2574         return 0;
2575 }
2576 EXPORT_SYMBOL(netdev_set_sb_channel);
2577
2578 /*
2579  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2580  * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2581  */
2582 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2583 {
2584         bool disabling;
2585         int rc;
2586
2587         disabling = txq < dev->real_num_tx_queues;
2588
2589         if (txq < 1 || txq > dev->num_tx_queues)
2590                 return -EINVAL;
2591
2592         if (dev->reg_state == NETREG_REGISTERED ||
2593             dev->reg_state == NETREG_UNREGISTERING) {
2594                 ASSERT_RTNL();
2595
2596                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2597                                                   txq);
2598                 if (rc)
2599                         return rc;
2600
2601                 if (dev->num_tc)
2602                         netif_setup_tc(dev, txq);
2603
2604                 dev->real_num_tx_queues = txq;
2605
2606                 if (disabling) {
2607                         synchronize_net();
2608                         qdisc_reset_all_tx_gt(dev, txq);
2609 #ifdef CONFIG_XPS
2610                         netif_reset_xps_queues_gt(dev, txq);
2611 #endif
2612                 }
2613         } else {
2614                 dev->real_num_tx_queues = txq;
2615         }
2616
2617         return 0;
2618 }
2619 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2620
2621 #ifdef CONFIG_SYSFS
2622 /**
2623  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2624  *      @dev: Network device
2625  *      @rxq: Actual number of RX queues
2626  *
2627  *      This must be called either with the rtnl_lock held or before
2628  *      registration of the net device.  Returns 0 on success, or a
2629  *      negative error code.  If called before registration, it always
2630  *      succeeds.
2631  */
2632 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2633 {
2634         int rc;
2635
2636         if (rxq < 1 || rxq > dev->num_rx_queues)
2637                 return -EINVAL;
2638
2639         if (dev->reg_state == NETREG_REGISTERED) {
2640                 ASSERT_RTNL();
2641
2642                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2643                                                   rxq);
2644                 if (rc)
2645                         return rc;
2646         }
2647
2648         dev->real_num_rx_queues = rxq;
2649         return 0;
2650 }
2651 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2652 #endif
2653
2654 /**
2655  * netif_get_num_default_rss_queues - default number of RSS queues
2656  *
2657  * This routine should set an upper limit on the number of RSS queues
2658  * used by default by multiqueue devices.
2659  */
2660 int netif_get_num_default_rss_queues(void)
2661 {
2662         return is_kdump_kernel() ?
2663                 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2664 }
2665 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2666
2667 static void __netif_reschedule(struct Qdisc *q)
2668 {
2669         struct softnet_data *sd;
2670         unsigned long flags;
2671
2672         local_irq_save(flags);
2673         sd = this_cpu_ptr(&softnet_data);
2674         q->next_sched = NULL;
2675         *sd->output_queue_tailp = q;
2676         sd->output_queue_tailp = &q->next_sched;
2677         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2678         local_irq_restore(flags);
2679 }
2680
2681 void __netif_schedule(struct Qdisc *q)
2682 {
2683         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2684                 __netif_reschedule(q);
2685 }
2686 EXPORT_SYMBOL(__netif_schedule);
2687
2688 struct dev_kfree_skb_cb {
2689         enum skb_free_reason reason;
2690 };
2691
2692 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2693 {
2694         return (struct dev_kfree_skb_cb *)skb->cb;
2695 }
2696
2697 void netif_schedule_queue(struct netdev_queue *txq)
2698 {
2699         rcu_read_lock();
2700         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2701                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2702
2703                 __netif_schedule(q);
2704         }
2705         rcu_read_unlock();
2706 }
2707 EXPORT_SYMBOL(netif_schedule_queue);
2708
2709 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2710 {
2711         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2712                 struct Qdisc *q;
2713
2714                 rcu_read_lock();
2715                 q = rcu_dereference(dev_queue->qdisc);
2716                 __netif_schedule(q);
2717                 rcu_read_unlock();
2718         }
2719 }
2720 EXPORT_SYMBOL(netif_tx_wake_queue);
2721
2722 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2723 {
2724         unsigned long flags;
2725
2726         if (unlikely(!skb))
2727                 return;
2728
2729         if (likely(refcount_read(&skb->users) == 1)) {
2730                 smp_rmb();
2731                 refcount_set(&skb->users, 0);
2732         } else if (likely(!refcount_dec_and_test(&skb->users))) {
2733                 return;
2734         }
2735         get_kfree_skb_cb(skb)->reason = reason;
2736         local_irq_save(flags);
2737         skb->next = __this_cpu_read(softnet_data.completion_queue);
2738         __this_cpu_write(softnet_data.completion_queue, skb);
2739         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2740         local_irq_restore(flags);
2741 }
2742 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2743
2744 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2745 {
2746         if (in_irq() || irqs_disabled())
2747                 __dev_kfree_skb_irq(skb, reason);
2748         else
2749                 dev_kfree_skb(skb);
2750 }
2751 EXPORT_SYMBOL(__dev_kfree_skb_any);
2752
2753
2754 /**
2755  * netif_device_detach - mark device as removed
2756  * @dev: network device
2757  *
2758  * Mark device as removed from system and therefore no longer available.
2759  */
2760 void netif_device_detach(struct net_device *dev)
2761 {
2762         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2763             netif_running(dev)) {
2764                 netif_tx_stop_all_queues(dev);
2765         }
2766 }
2767 EXPORT_SYMBOL(netif_device_detach);
2768
2769 /**
2770  * netif_device_attach - mark device as attached
2771  * @dev: network device
2772  *
2773  * Mark device as attached from system and restart if needed.
2774  */
2775 void netif_device_attach(struct net_device *dev)
2776 {
2777         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2778             netif_running(dev)) {
2779                 netif_tx_wake_all_queues(dev);
2780                 __netdev_watchdog_up(dev);
2781         }
2782 }
2783 EXPORT_SYMBOL(netif_device_attach);
2784
2785 /*
2786  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2787  * to be used as a distribution range.
2788  */
2789 static u16 skb_tx_hash(const struct net_device *dev,
2790                        const struct net_device *sb_dev,
2791                        struct sk_buff *skb)
2792 {
2793         u32 hash;
2794         u16 qoffset = 0;
2795         u16 qcount = dev->real_num_tx_queues;
2796
2797         if (dev->num_tc) {
2798                 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2799
2800                 qoffset = sb_dev->tc_to_txq[tc].offset;
2801                 qcount = sb_dev->tc_to_txq[tc].count;
2802         }
2803
2804         if (skb_rx_queue_recorded(skb)) {
2805                 hash = skb_get_rx_queue(skb);
2806                 while (unlikely(hash >= qcount))
2807                         hash -= qcount;
2808                 return hash + qoffset;
2809         }
2810
2811         return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2812 }
2813
2814 static void skb_warn_bad_offload(const struct sk_buff *skb)
2815 {
2816         static const netdev_features_t null_features;
2817         struct net_device *dev = skb->dev;
2818         const char *name = "";
2819
2820         if (!net_ratelimit())
2821                 return;
2822
2823         if (dev) {
2824                 if (dev->dev.parent)
2825                         name = dev_driver_string(dev->dev.parent);
2826                 else
2827                         name = netdev_name(dev);
2828         }
2829         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2830              "gso_type=%d ip_summed=%d\n",
2831              name, dev ? &dev->features : &null_features,
2832              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2833              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2834              skb_shinfo(skb)->gso_type, skb->ip_summed);
2835 }
2836
2837 /*
2838  * Invalidate hardware checksum when packet is to be mangled, and
2839  * complete checksum manually on outgoing path.
2840  */
2841 int skb_checksum_help(struct sk_buff *skb)
2842 {
2843         __wsum csum;
2844         int ret = 0, offset;
2845
2846         if (skb->ip_summed == CHECKSUM_COMPLETE)
2847                 goto out_set_summed;
2848
2849         if (unlikely(skb_shinfo(skb)->gso_size)) {
2850                 skb_warn_bad_offload(skb);
2851                 return -EINVAL;
2852         }
2853
2854         /* Before computing a checksum, we should make sure no frag could
2855          * be modified by an external entity : checksum could be wrong.
2856          */
2857         if (skb_has_shared_frag(skb)) {
2858                 ret = __skb_linearize(skb);
2859                 if (ret)
2860                         goto out;
2861         }
2862
2863         offset = skb_checksum_start_offset(skb);
2864         BUG_ON(offset >= skb_headlen(skb));
2865         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2866
2867         offset += skb->csum_offset;
2868         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2869
2870         if (skb_cloned(skb) &&
2871             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2872                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2873                 if (ret)
2874                         goto out;
2875         }
2876
2877         *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
2878 out_set_summed:
2879         skb->ip_summed = CHECKSUM_NONE;
2880 out:
2881         return ret;
2882 }
2883 EXPORT_SYMBOL(skb_checksum_help);
2884
2885 int skb_crc32c_csum_help(struct sk_buff *skb)
2886 {
2887         __le32 crc32c_csum;
2888         int ret = 0, offset, start;
2889
2890         if (skb->ip_summed != CHECKSUM_PARTIAL)
2891                 goto out;
2892
2893         if (unlikely(skb_is_gso(skb)))
2894                 goto out;
2895
2896         /* Before computing a checksum, we should make sure no frag could
2897          * be modified by an external entity : checksum could be wrong.
2898          */
2899         if (unlikely(skb_has_shared_frag(skb))) {
2900                 ret = __skb_linearize(skb);
2901                 if (ret)
2902                         goto out;
2903         }
2904         start = skb_checksum_start_offset(skb);
2905         offset = start + offsetof(struct sctphdr, checksum);
2906         if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
2907                 ret = -EINVAL;
2908                 goto out;
2909         }
2910         if (skb_cloned(skb) &&
2911             !skb_clone_writable(skb, offset + sizeof(__le32))) {
2912                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2913                 if (ret)
2914                         goto out;
2915         }
2916         crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
2917                                                   skb->len - start, ~(__u32)0,
2918                                                   crc32c_csum_stub));
2919         *(__le32 *)(skb->data + offset) = crc32c_csum;
2920         skb->ip_summed = CHECKSUM_NONE;
2921         skb->csum_not_inet = 0;
2922 out:
2923         return ret;
2924 }
2925
2926 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2927 {
2928         __be16 type = skb->protocol;
2929
2930         /* Tunnel gso handlers can set protocol to ethernet. */
2931         if (type == htons(ETH_P_TEB)) {
2932                 struct ethhdr *eth;
2933
2934                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2935                         return 0;
2936
2937                 eth = (struct ethhdr *)skb->data;
2938                 type = eth->h_proto;
2939         }
2940
2941         return __vlan_get_protocol(skb, type, depth);
2942 }
2943
2944 /**
2945  *      skb_mac_gso_segment - mac layer segmentation handler.
2946  *      @skb: buffer to segment
2947  *      @features: features for the output path (see dev->features)
2948  */
2949 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2950                                     netdev_features_t features)
2951 {
2952         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2953         struct packet_offload *ptype;
2954         int vlan_depth = skb->mac_len;
2955         __be16 type = skb_network_protocol(skb, &vlan_depth);
2956
2957         if (unlikely(!type))
2958                 return ERR_PTR(-EINVAL);
2959
2960         __skb_pull(skb, vlan_depth);
2961
2962         rcu_read_lock();
2963         list_for_each_entry_rcu(ptype, &offload_base, list) {
2964                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2965                         segs = ptype->callbacks.gso_segment(skb, features);
2966                         break;
2967                 }
2968         }
2969         rcu_read_unlock();
2970
2971         __skb_push(skb, skb->data - skb_mac_header(skb));
2972
2973         return segs;
2974 }
2975 EXPORT_SYMBOL(skb_mac_gso_segment);
2976
2977
2978 /* openvswitch calls this on rx path, so we need a different check.
2979  */
2980 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2981 {
2982         if (tx_path)
2983                 return skb->ip_summed != CHECKSUM_PARTIAL &&
2984                        skb->ip_summed != CHECKSUM_UNNECESSARY;
2985
2986         return skb->ip_summed == CHECKSUM_NONE;
2987 }
2988
2989 /**
2990  *      __skb_gso_segment - Perform segmentation on skb.
2991  *      @skb: buffer to segment
2992  *      @features: features for the output path (see dev->features)
2993  *      @tx_path: whether it is called in TX path
2994  *
2995  *      This function segments the given skb and returns a list of segments.
2996  *
2997  *      It may return NULL if the skb requires no segmentation.  This is
2998  *      only possible when GSO is used for verifying header integrity.
2999  *
3000  *      Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
3001  */
3002 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3003                                   netdev_features_t features, bool tx_path)
3004 {
3005         struct sk_buff *segs;
3006
3007         if (unlikely(skb_needs_check(skb, tx_path))) {
3008                 int err;
3009
3010                 /* We're going to init ->check field in TCP or UDP header */
3011                 err = skb_cow_head(skb, 0);
3012                 if (err < 0)
3013                         return ERR_PTR(err);
3014         }
3015
3016         /* Only report GSO partial support if it will enable us to
3017          * support segmentation on this frame without needing additional
3018          * work.
3019          */
3020         if (features & NETIF_F_GSO_PARTIAL) {
3021                 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3022                 struct net_device *dev = skb->dev;
3023
3024                 partial_features |= dev->features & dev->gso_partial_features;
3025                 if (!skb_gso_ok(skb, features | partial_features))
3026                         features &= ~NETIF_F_GSO_PARTIAL;
3027         }
3028
3029         BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
3030                      sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3031
3032         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3033         SKB_GSO_CB(skb)->encap_level = 0;
3034
3035         skb_reset_mac_header(skb);
3036         skb_reset_mac_len(skb);
3037
3038         segs = skb_mac_gso_segment(skb, features);
3039
3040         if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
3041                 skb_warn_bad_offload(skb);
3042
3043         return segs;
3044 }
3045 EXPORT_SYMBOL(__skb_gso_segment);
3046
3047 /* Take action when hardware reception checksum errors are detected. */
3048 #ifdef CONFIG_BUG
3049 void netdev_rx_csum_fault(struct net_device *dev)
3050 {
3051         if (net_ratelimit()) {
3052                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
3053                 dump_stack();
3054         }
3055 }
3056 EXPORT_SYMBOL(netdev_rx_csum_fault);
3057 #endif
3058
3059 /* XXX: check that highmem exists at all on the given machine. */
3060 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3061 {
3062 #ifdef CONFIG_HIGHMEM
3063         int i;
3064
3065         if (!(dev->features & NETIF_F_HIGHDMA)) {
3066                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3067                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3068
3069                         if (PageHighMem(skb_frag_page(frag)))
3070                                 return 1;
3071                 }
3072         }
3073 #endif
3074         return 0;
3075 }
3076
3077 /* If MPLS offload request, verify we are testing hardware MPLS features
3078  * instead of standard features for the netdev.
3079  */
3080 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3081 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3082                                            netdev_features_t features,
3083                                            __be16 type)
3084 {
3085         if (eth_p_mpls(type))
3086                 features &= skb->dev->mpls_features;
3087
3088         return features;
3089 }
3090 #else
3091 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3092                                            netdev_features_t features,
3093                                            __be16 type)
3094 {
3095         return features;
3096 }
3097 #endif
3098
3099 static netdev_features_t harmonize_features(struct sk_buff *skb,
3100         netdev_features_t features)
3101 {
3102         int tmp;
3103         __be16 type;
3104
3105         type = skb_network_protocol(skb, &tmp);
3106         features = net_mpls_features(skb, features, type);
3107
3108         if (skb->ip_summed != CHECKSUM_NONE &&
3109             !can_checksum_protocol(features, type)) {
3110                 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3111         }
3112         if (illegal_highdma(skb->dev, skb))
3113                 features &= ~NETIF_F_SG;
3114
3115         return features;
3116 }
3117
3118 netdev_features_t passthru_features_check(struct sk_buff *skb,
3119                                           struct net_device *dev,
3120                                           netdev_features_t features)
3121 {
3122         return features;
3123 }
3124 EXPORT_SYMBOL(passthru_features_check);
3125
3126 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3127                                              struct net_device *dev,
3128                                              netdev_features_t features)
3129 {
3130         return vlan_features_check(skb, features);
3131 }
3132
3133 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3134                                             struct net_device *dev,
3135                                             netdev_features_t features)
3136 {
3137         u16 gso_segs = skb_shinfo(skb)->gso_segs;
3138
3139         if (gso_segs > dev->gso_max_segs)
3140                 return features & ~NETIF_F_GSO_MASK;
3141
3142         /* Support for GSO partial features requires software
3143          * intervention before we can actually process the packets
3144          * so we need to strip support for any partial features now
3145          * and we can pull them back in after we have partially
3146          * segmented the frame.
3147          */
3148         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3149                 features &= ~dev->gso_partial_features;
3150
3151         /* Make sure to clear the IPv4 ID mangling feature if the
3152          * IPv4 header has the potential to be fragmented.
3153          */
3154         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3155                 struct iphdr *iph = skb->encapsulation ?
3156                                     inner_ip_hdr(skb) : ip_hdr(skb);
3157
3158                 if (!(iph->frag_off & htons(IP_DF)))
3159                         features &= ~NETIF_F_TSO_MANGLEID;
3160         }
3161
3162         return features;
3163 }
3164
3165 netdev_features_t netif_skb_features(struct sk_buff *skb)
3166 {
3167         struct net_device *dev = skb->dev;
3168         netdev_features_t features = dev->features;
3169
3170         if (skb_is_gso(skb))
3171                 features = gso_features_check(skb, dev, features);
3172
3173         /* If encapsulation offload request, verify we are testing
3174          * hardware encapsulation features instead of standard
3175          * features for the netdev
3176          */
3177         if (skb->encapsulation)
3178                 features &= dev->hw_enc_features;
3179
3180         if (skb_vlan_tagged(skb))
3181                 features = netdev_intersect_features(features,
3182                                                      dev->vlan_features |
3183                                                      NETIF_F_HW_VLAN_CTAG_TX |
3184                                                      NETIF_F_HW_VLAN_STAG_TX);
3185
3186         if (dev->netdev_ops->ndo_features_check)
3187                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3188                                                                 features);
3189         else
3190                 features &= dflt_features_check(skb, dev, features);
3191
3192         return harmonize_features(skb, features);
3193 }
3194 EXPORT_SYMBOL(netif_skb_features);
3195
3196 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3197                     struct netdev_queue *txq, bool more)
3198 {
3199         unsigned int len;
3200         int rc;
3201
3202         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
3203                 dev_queue_xmit_nit(skb, dev);
3204
3205         len = skb->len;
3206         trace_net_dev_start_xmit(skb, dev);
3207         rc = netdev_start_xmit(skb, dev, txq, more);
3208         trace_net_dev_xmit(skb, rc, dev, len);
3209
3210         return rc;
3211 }
3212
3213 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3214                                     struct netdev_queue *txq, int *ret)
3215 {
3216         struct sk_buff *skb = first;
3217         int rc = NETDEV_TX_OK;
3218
3219         while (skb) {
3220                 struct sk_buff *next = skb->next;
3221
3222                 skb->next = NULL;
3223                 rc = xmit_one(skb, dev, txq, next != NULL);
3224                 if (unlikely(!dev_xmit_complete(rc))) {
3225                         skb->next = next;
3226                         goto out;
3227                 }
3228
3229                 skb = next;
3230                 if (netif_xmit_stopped(txq) && skb) {
3231                         rc = NETDEV_TX_BUSY;
3232                         break;
3233                 }
3234         }
3235
3236 out:
3237         *ret = rc;
3238         return skb;
3239 }
3240
3241 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3242                                           netdev_features_t features)
3243 {
3244         if (skb_vlan_tag_present(skb) &&
3245             !vlan_hw_offload_capable(features, skb->vlan_proto))
3246                 skb = __vlan_hwaccel_push_inside(skb);
3247         return skb;
3248 }
3249
3250 int skb_csum_hwoffload_help(struct sk_buff *skb,
3251                             const netdev_features_t features)
3252 {
3253         if (unlikely(skb->csum_not_inet))
3254                 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3255                         skb_crc32c_csum_help(skb);
3256
3257         return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb);
3258 }
3259 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3260
3261 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3262 {
3263         netdev_features_t features;
3264
3265         features = netif_skb_features(skb);
3266         skb = validate_xmit_vlan(skb, features);
3267         if (unlikely(!skb))
3268                 goto out_null;
3269
3270         skb = sk_validate_xmit_skb(skb, dev);
3271         if (unlikely(!skb))
3272                 goto out_null;
3273
3274         if (netif_needs_gso(skb, features)) {
3275                 struct sk_buff *segs;
3276
3277                 segs = skb_gso_segment(skb, features);
3278                 if (IS_ERR(segs)) {
3279                         goto out_kfree_skb;
3280                 } else if (segs) {
3281                         consume_skb(skb);
3282                         skb = segs;
3283                 }
3284         } else {
3285                 if (skb_needs_linearize(skb, features) &&
3286                     __skb_linearize(skb))
3287                         goto out_kfree_skb;
3288
3289                 /* If packet is not checksummed and device does not
3290                  * support checksumming for this protocol, complete
3291                  * checksumming here.
3292                  */
3293                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3294                         if (skb->encapsulation)
3295                                 skb_set_inner_transport_header(skb,
3296                                                                skb_checksum_start_offset(skb));
3297                         else
3298                                 skb_set_transport_header(skb,
3299                                                          skb_checksum_start_offset(skb));
3300                         if (skb_csum_hwoffload_help(skb, features))
3301                                 goto out_kfree_skb;
3302                 }
3303         }
3304
3305         skb = validate_xmit_xfrm(skb, features, again);
3306
3307         return skb;
3308
3309 out_kfree_skb:
3310         kfree_skb(skb);
3311 out_null:
3312         atomic_long_inc(&dev->tx_dropped);
3313         return NULL;
3314 }
3315
3316 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3317 {
3318         struct sk_buff *next, *head = NULL, *tail;
3319
3320         for (; skb != NULL; skb = next) {
3321                 next = skb->next;
3322                 skb->next = NULL;
3323
3324                 /* in case skb wont be segmented, point to itself */
3325                 skb->prev = skb;
3326
3327                 skb = validate_xmit_skb(skb, dev, again);
3328                 if (!skb)
3329                         continue;
3330
3331                 if (!head)
3332                         head = skb;
3333                 else
3334                         tail->next = skb;
3335                 /* If skb was segmented, skb->prev points to
3336                  * the last segment. If not, it still contains skb.
3337                  */
3338                 tail = skb->prev;
3339         }
3340         return head;
3341 }
3342 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3343
3344 static void qdisc_pkt_len_init(struct sk_buff *skb)
3345 {
3346         const struct skb_shared_info *shinfo = skb_shinfo(skb);
3347
3348         qdisc_skb_cb(skb)->pkt_len = skb->len;
3349
3350         /* To get more precise estimation of bytes sent on wire,
3351          * we add to pkt_len the headers size of all segments
3352          */
3353         if (shinfo->gso_size)  {
3354                 unsigned int hdr_len;
3355                 u16 gso_segs = shinfo->gso_segs;
3356
3357                 /* mac layer + network layer */
3358                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3359
3360                 /* + transport layer */
3361                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3362                         const struct tcphdr *th;
3363                         struct tcphdr _tcphdr;
3364
3365                         th = skb_header_pointer(skb, skb_transport_offset(skb),
3366                                                 sizeof(_tcphdr), &_tcphdr);
3367                         if (likely(th))
3368                                 hdr_len += __tcp_hdrlen(th);
3369                 } else {
3370                         struct udphdr _udphdr;
3371
3372                         if (skb_header_pointer(skb, skb_transport_offset(skb),
3373                                                sizeof(_udphdr), &_udphdr))
3374                                 hdr_len += sizeof(struct udphdr);
3375                 }
3376
3377                 if (shinfo->gso_type & SKB_GSO_DODGY)
3378                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3379                                                 shinfo->gso_size);
3380
3381                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3382         }
3383 }
3384
3385 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3386                                  struct net_device *dev,
3387                                  struct netdev_queue *txq)
3388 {
3389         spinlock_t *root_lock = qdisc_lock(q);
3390         struct sk_buff *to_free = NULL;
3391         bool contended;
3392         int rc;
3393
3394         qdisc_calculate_pkt_len(skb, q);
3395
3396         if (q->flags & TCQ_F_NOLOCK) {
3397                 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3398                         __qdisc_drop(skb, &to_free);
3399                         rc = NET_XMIT_DROP;
3400                 } else {
3401                         rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3402                         qdisc_run(q);
3403                 }
3404
3405                 if (unlikely(to_free))
3406                         kfree_skb_list(to_free);
3407                 return rc;
3408         }
3409
3410         /*
3411          * Heuristic to force contended enqueues to serialize on a
3412          * separate lock before trying to get qdisc main lock.
3413          * This permits qdisc->running owner to get the lock more
3414          * often and dequeue packets faster.
3415          */
3416         contended = qdisc_is_running(q);
3417         if (unlikely(contended))
3418                 spin_lock(&q->busylock);
3419
3420         spin_lock(root_lock);
3421         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3422                 __qdisc_drop(skb, &to_free);
3423                 rc = NET_XMIT_DROP;
3424         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3425                    qdisc_run_begin(q)) {
3426                 /*
3427                  * This is a work-conserving queue; there are no old skbs
3428                  * waiting to be sent out; and the qdisc is not running -
3429                  * xmit the skb directly.
3430                  */
3431
3432                 qdisc_bstats_update(q, skb);
3433
3434                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3435                         if (unlikely(contended)) {
3436                                 spin_unlock(&q->busylock);
3437                                 contended = false;
3438                         }
3439                         __qdisc_run(q);
3440                 }
3441
3442                 qdisc_run_end(q);
3443                 rc = NET_XMIT_SUCCESS;
3444         } else {
3445                 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
3446                 if (qdisc_run_begin(q)) {
3447                         if (unlikely(contended)) {
3448                                 spin_unlock(&q->busylock);
3449                                 contended = false;
3450                         }
3451                         __qdisc_run(q);
3452                         qdisc_run_end(q);
3453                 }
3454         }
3455         spin_unlock(root_lock);
3456         if (unlikely(to_free))
3457                 kfree_skb_list(to_free);
3458         if (unlikely(contended))
3459                 spin_unlock(&q->busylock);
3460         return rc;
3461 }
3462
3463 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3464 static void skb_update_prio(struct sk_buff *skb)
3465 {
3466         const struct netprio_map *map;
3467         const struct sock *sk;
3468         unsigned int prioidx;
3469
3470         if (skb->priority)
3471                 return;
3472         map = rcu_dereference_bh(skb->dev->priomap);
3473         if (!map)
3474                 return;
3475         sk = skb_to_full_sk(skb);
3476         if (!sk)
3477                 return;
3478
3479         prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3480
3481         if (prioidx < map->priomap_len)
3482                 skb->priority = map->priomap[prioidx];
3483 }
3484 #else
3485 #define skb_update_prio(skb)
3486 #endif
3487
3488 DEFINE_PER_CPU(int, xmit_recursion);
3489 EXPORT_SYMBOL(xmit_recursion);
3490
3491 /**
3492  *      dev_loopback_xmit - loop back @skb
3493  *      @net: network namespace this loopback is happening in
3494  *      @sk:  sk needed to be a netfilter okfn
3495  *      @skb: buffer to transmit
3496  */
3497 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3498 {
3499         skb_reset_mac_header(skb);
3500         __skb_pull(skb, skb_network_offset(skb));
3501         skb->pkt_type = PACKET_LOOPBACK;
3502         skb->ip_summed = CHECKSUM_UNNECESSARY;
3503         WARN_ON(!skb_dst(skb));
3504         skb_dst_force(skb);
3505         netif_rx_ni(skb);
3506         return 0;
3507 }
3508 EXPORT_SYMBOL(dev_loopback_xmit);
3509
3510 #ifdef CONFIG_NET_EGRESS
3511 static struct sk_buff *
3512 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3513 {
3514         struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
3515         struct tcf_result cl_res;
3516
3517         if (!miniq)
3518                 return skb;
3519
3520         /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
3521         mini_qdisc_bstats_cpu_update(miniq, skb);
3522
3523         switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
3524         case TC_ACT_OK:
3525         case TC_ACT_RECLASSIFY:
3526                 skb->tc_index = TC_H_MIN(cl_res.classid);
3527                 break;
3528         case TC_ACT_SHOT:
3529                 mini_qdisc_qstats_cpu_drop(miniq);
3530                 *ret = NET_XMIT_DROP;
3531                 kfree_skb(skb);
3532                 return NULL;
3533         case TC_ACT_STOLEN:
3534         case TC_ACT_QUEUED:
3535         case TC_ACT_TRAP:
3536                 *ret = NET_XMIT_SUCCESS;
3537                 consume_skb(skb);
3538                 return NULL;
3539         case TC_ACT_REDIRECT:
3540                 /* No need to push/pop skb's mac_header here on egress! */
3541                 skb_do_redirect(skb);
3542                 *ret = NET_XMIT_SUCCESS;
3543                 return NULL;
3544         default:
3545                 break;
3546         }
3547
3548         return skb;
3549 }
3550 #endif /* CONFIG_NET_EGRESS */
3551
3552 #ifdef CONFIG_XPS
3553 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3554                                struct xps_dev_maps *dev_maps, unsigned int tci)
3555 {
3556         struct xps_map *map;
3557         int queue_index = -1;
3558
3559         if (dev->num_tc) {
3560                 tci *= dev->num_tc;
3561                 tci += netdev_get_prio_tc_map(dev, skb->priority);
3562         }
3563
3564         map = rcu_dereference(dev_maps->attr_map[tci]);
3565         if (map) {
3566                 if (map->len == 1)
3567                         queue_index = map->queues[0];
3568                 else
3569                         queue_index = map->queues[reciprocal_scale(
3570                                                 skb_get_hash(skb), map->len)];
3571                 if (unlikely(queue_index >= dev->real_num_tx_queues))
3572                         queue_index = -1;
3573         }
3574         return queue_index;
3575 }
3576 #endif
3577
3578 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3579                          struct sk_buff *skb)
3580 {
3581 #ifdef CONFIG_XPS
3582         struct xps_dev_maps *dev_maps;
3583         struct sock *sk = skb->sk;
3584         int queue_index = -1;
3585
3586         if (!static_key_false(&xps_needed))
3587                 return -1;
3588
3589         rcu_read_lock();
3590         if (!static_key_false(&xps_rxqs_needed))
3591                 goto get_cpus_map;
3592
3593         dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
3594         if (dev_maps) {
3595                 int tci = sk_rx_queue_get(sk);
3596
3597                 if (tci >= 0 && tci < dev->num_rx_queues)
3598                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3599                                                           tci);
3600         }
3601
3602 get_cpus_map:
3603         if (queue_index < 0) {
3604                 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
3605                 if (dev_maps) {
3606                         unsigned int tci = skb->sender_cpu - 1;
3607
3608                         queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3609                                                           tci);
3610                 }
3611         }
3612         rcu_read_unlock();
3613
3614         return queue_index;
3615 #else
3616         return -1;
3617 #endif
3618 }
3619
3620 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
3621                      struct net_device *sb_dev,
3622                      select_queue_fallback_t fallback)
3623 {
3624         return 0;
3625 }
3626 EXPORT_SYMBOL(dev_pick_tx_zero);
3627
3628 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
3629                        struct net_device *sb_dev,
3630                        select_queue_fallback_t fallback)
3631 {
3632         return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
3633 }
3634 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
3635
3636 static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
3637                             struct net_device *sb_dev)
3638 {
3639         struct sock *sk = skb->sk;
3640         int queue_index = sk_tx_queue_get(sk);
3641
3642         sb_dev = sb_dev ? : dev;
3643
3644         if (queue_index < 0 || skb->ooo_okay ||
3645             queue_index >= dev->real_num_tx_queues) {
3646                 int new_index = get_xps_queue(dev, sb_dev, skb);
3647
3648                 if (new_index < 0)
3649                         new_index = skb_tx_hash(dev, sb_dev, skb);
3650
3651                 if (queue_index != new_index && sk &&
3652                     sk_fullsock(sk) &&
3653                     rcu_access_pointer(sk->sk_dst_cache))
3654                         sk_tx_queue_set(sk, new_index);
3655
3656                 queue_index = new_index;
3657         }
3658
3659         return queue_index;
3660 }
3661
3662 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3663                                     struct sk_buff *skb,
3664                                     struct net_device *sb_dev)
3665 {
3666         int queue_index = 0;
3667
3668 #ifdef CONFIG_XPS
3669         u32 sender_cpu = skb->sender_cpu - 1;
3670
3671         if (sender_cpu >= (u32)NR_CPUS)
3672                 skb->sender_cpu = raw_smp_processor_id() + 1;
3673 #endif
3674
3675         if (dev->real_num_tx_queues != 1) {
3676                 const struct net_device_ops *ops = dev->netdev_ops;
3677
3678                 if (ops->ndo_select_queue)
3679                         queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
3680                                                             __netdev_pick_tx);
3681                 else
3682                         queue_index = __netdev_pick_tx(dev, skb, sb_dev);
3683
3684                 queue_index = netdev_cap_txqueue(dev, queue_index);
3685         }
3686
3687         skb_set_queue_mapping(skb, queue_index);
3688         return netdev_get_tx_queue(dev, queue_index);
3689 }
3690
3691 /**
3692  *      __dev_queue_xmit - transmit a buffer
3693  *      @skb: buffer to transmit
3694  *      @sb_dev: suboordinate device used for L2 forwarding offload
3695  *
3696  *      Queue a buffer for transmission to a network device. The caller must
3697  *      have set the device and priority and built the buffer before calling
3698  *      this function. The function can be called from an interrupt.
3699  *
3700  *      A negative errno code is returned on a failure. A success does not
3701  *      guarantee the frame will be transmitted as it may be dropped due
3702  *      to congestion or traffic shaping.
3703  *
3704  * -----------------------------------------------------------------------------------
3705  *      I notice this method can also return errors from the queue disciplines,
3706  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
3707  *      be positive.
3708  *
3709  *      Regardless of the return value, the skb is consumed, so it is currently
3710  *      difficult to retry a send to this method.  (You can bump the ref count
3711  *      before sending to hold a reference for retry if you are careful.)
3712  *
3713  *      When calling this method, interrupts MUST be enabled.  This is because
3714  *      the BH enable code must have IRQs enabled so that it will not deadlock.
3715  *          --BLG
3716  */
3717 static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
3718 {
3719         struct net_device *dev = skb->dev;
3720         struct netdev_queue *txq;
3721         struct Qdisc *q;
3722         int rc = -ENOMEM;
3723         bool again = false;
3724
3725         skb_reset_mac_header(skb);
3726
3727         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3728                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3729
3730         /* Disable soft irqs for various locks below. Also
3731          * stops preemption for RCU.
3732          */
3733         rcu_read_lock_bh();
3734
3735         skb_update_prio(skb);
3736
3737         qdisc_pkt_len_init(skb);
3738 #ifdef CONFIG_NET_CLS_ACT
3739         skb->tc_at_ingress = 0;
3740 # ifdef CONFIG_NET_EGRESS
3741         if (static_branch_unlikely(&egress_needed_key)) {
3742                 skb = sch_handle_egress(skb, &rc, dev);
3743                 if (!skb)
3744                         goto out;
3745         }
3746 # endif
3747 #endif
3748         /* If device/qdisc don't need skb->dst, release it right now while
3749          * its hot in this cpu cache.
3750          */
3751         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3752                 skb_dst_drop(skb);
3753         else
3754                 skb_dst_force(skb);
3755
3756         txq = netdev_pick_tx(dev, skb, sb_dev);
3757         q = rcu_dereference_bh(txq->qdisc);
3758
3759         trace_net_dev_queue(skb);
3760         if (q->enqueue) {
3761                 rc = __dev_xmit_skb(skb, q, dev, txq);
3762                 goto out;
3763         }
3764
3765         /* The device has no queue. Common case for software devices:
3766          * loopback, all the sorts of tunnels...
3767
3768          * Really, it is unlikely that netif_tx_lock protection is necessary
3769          * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
3770          * counters.)
3771          * However, it is possible, that they rely on protection
3772          * made by us here.
3773
3774          * Check this and shot the lock. It is not prone from deadlocks.
3775          *Either shot noqueue qdisc, it is even simpler 8)
3776          */
3777         if (dev->flags & IFF_UP) {
3778                 int cpu = smp_processor_id(); /* ok because BHs are off */
3779
3780                 if (txq->xmit_lock_owner != cpu) {
3781                         if (unlikely(__this_cpu_read(xmit_recursion) >
3782                                      XMIT_RECURSION_LIMIT))
3783                                 goto recursion_alert;
3784
3785                         skb = validate_xmit_skb(skb, dev, &again);
3786                         if (!skb)
3787                                 goto out;
3788
3789                         HARD_TX_LOCK(dev, txq, cpu);
3790
3791                         if (!netif_xmit_stopped(txq)) {
3792                                 __this_cpu_inc(xmit_recursion);
3793                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
3794                                 __this_cpu_dec(xmit_recursion);
3795                                 if (dev_xmit_complete(rc)) {
3796                                         HARD_TX_UNLOCK(dev, txq);
3797                                         goto out;
3798                                 }
3799                         }
3800                         HARD_TX_UNLOCK(dev, txq);
3801                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3802                                              dev->name);
3803                 } else {
3804                         /* Recursion is detected! It is possible,
3805                          * unfortunately
3806                          */
3807 recursion_alert:
3808                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3809                                              dev->name);
3810                 }
3811         }
3812
3813         rc = -ENETDOWN;
3814         rcu_read_unlock_bh();
3815
3816         atomic_long_inc(&dev->tx_dropped);
3817         kfree_skb_list(skb);
3818         return rc;
3819 out:
3820         rcu_read_unlock_bh();
3821         return rc;
3822 }
3823
3824 int dev_queue_xmit(struct sk_buff *skb)
3825 {
3826         return __dev_queue_xmit(skb, NULL);
3827 }
3828 EXPORT_SYMBOL(dev_queue_xmit);
3829
3830 int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
3831 {
3832         return __dev_queue_xmit(skb, sb_dev);
3833 }
3834 EXPORT_SYMBOL(dev_queue_xmit_accel);
3835
3836 int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
3837 {
3838         struct net_device *dev = skb->dev;
3839         struct sk_buff *orig_skb = skb;
3840         struct netdev_queue *txq;
3841         int ret = NETDEV_TX_BUSY;
3842         bool again = false;
3843
3844         if (unlikely(!netif_running(dev) ||
3845                      !netif_carrier_ok(dev)))
3846                 goto drop;
3847
3848         skb = validate_xmit_skb_list(skb, dev, &again);
3849         if (skb != orig_skb)
3850                 goto drop;
3851
3852         skb_set_queue_mapping(skb, queue_id);
3853         txq = skb_get_tx_queue(dev, skb);
3854
3855         local_bh_disable();
3856
3857         HARD_TX_LOCK(dev, txq, smp_processor_id());
3858         if (!netif_xmit_frozen_or_drv_stopped(txq))
3859                 ret = netdev_start_xmit(skb, dev, txq, false);
3860         HARD_TX_UNLOCK(dev, txq);
3861
3862         local_bh_enable();
3863
3864         if (!dev_xmit_complete(ret))
3865                 kfree_skb(skb);
3866
3867         return ret;
3868 drop:
3869         atomic_long_inc(&dev->tx_dropped);
3870         kfree_skb_list(skb);
3871         return NET_XMIT_DROP;
3872 }
3873 EXPORT_SYMBOL(dev_direct_xmit);
3874
3875 /*************************************************************************
3876  *                      Receiver routines
3877  *************************************************************************/
3878
3879 int netdev_max_backlog __read_mostly = 1000;
3880 EXPORT_SYMBOL(netdev_max_backlog);
3881
3882 int netdev_tstamp_prequeue __read_mostly = 1;
3883 int netdev_budget __read_mostly = 300;
3884 unsigned int __read_mostly netdev_budget_usecs = 2000;
3885 int weight_p __read_mostly = 64;           /* old backlog weight */
3886 int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
3887 int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
3888 int dev_rx_weight __read_mostly = 64;
3889 int dev_tx_weight __read_mostly = 64;
3890
3891 /* Called with irq disabled */
3892 static inline void ____napi_schedule(struct softnet_data *sd,
3893                                      struct napi_struct *napi)
3894 {
3895         list_add_tail(&napi->poll_list, &sd->poll_list);
3896         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3897 }
3898
3899 #ifdef CONFIG_RPS
3900
3901 /* One global table that all flow-based protocols share. */
3902 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3903 EXPORT_SYMBOL(rps_sock_flow_table);
3904 u32 rps_cpu_mask __read_mostly;
3905 EXPORT_SYMBOL(rps_cpu_mask);
3906
3907 struct static_key rps_needed __read_mostly;
3908 EXPORT_SYMBOL(rps_needed);
3909 struct static_key rfs_needed __read_mostly;
3910 EXPORT_SYMBOL(rfs_needed);
3911
3912 static struct rps_dev_flow *
3913 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3914             struct rps_dev_flow *rflow, u16 next_cpu)
3915 {
3916         if (next_cpu < nr_cpu_ids) {
3917 #ifdef CONFIG_RFS_ACCEL
3918                 struct netdev_rx_queue *rxqueue;
3919                 struct rps_dev_flow_table *flow_table;
3920                 struct rps_dev_flow *old_rflow;
3921                 u32 flow_id;
3922                 u16 rxq_index;
3923                 int rc;
3924
3925                 /* Should we steer this flow to a different hardware queue? */
3926                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3927                     !(dev->features & NETIF_F_NTUPLE))
3928                         goto out;
3929                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3930                 if (rxq_index == skb_get_rx_queue(skb))
3931                         goto out;
3932
3933                 rxqueue = dev->_rx + rxq_index;
3934                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3935                 if (!flow_table)
3936                         goto out;
3937                 flow_id = skb_get_hash(skb) & flow_table->mask;
3938                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3939                                                         rxq_index, flow_id);
3940                 if (rc < 0)
3941                         goto out;
3942                 old_rflow = rflow;
3943                 rflow = &flow_table->flows[flow_id];
3944                 rflow->filter = rc;
3945                 if (old_rflow->filter == rflow->filter)
3946                         old_rflow->filter = RPS_NO_FILTER;
3947         out:
3948 #endif
3949                 rflow->last_qtail =
3950                         per_cpu(softnet_data, next_cpu).input_queue_head;
3951         }
3952
3953         rflow->cpu = next_cpu;
3954         return rflow;
3955 }
3956
3957 /*
3958  * get_rps_cpu is called from netif_receive_skb and returns the target
3959  * CPU from the RPS map of the receiving queue for a given skb.
3960  * rcu_read_lock must be held on entry.
3961  */
3962 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3963                        struct rps_dev_flow **rflowp)
3964 {
3965         const struct rps_sock_flow_table *sock_flow_table;
3966         struct netdev_rx_queue *rxqueue = dev->_rx;
3967         struct rps_dev_flow_table *flow_table;
3968         struct rps_map *map;
3969         int cpu = -1;
3970         u32 tcpu;
3971         u32 hash;
3972
3973         if (skb_rx_queue_recorded(skb)) {
3974                 u16 index = skb_get_rx_queue(skb);
3975
3976                 if (unlikely(index >= dev->real_num_rx_queues)) {
3977                         WARN_ONCE(dev->real_num_rx_queues > 1,
3978                                   "%s received packet on queue %u, but number "
3979                                   "of RX queues is %u\n",
3980                                   dev->name, index, dev->real_num_rx_queues);
3981                         goto done;
3982                 }
3983                 rxqueue += index;
3984         }
3985
3986         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3987
3988         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3989         map = rcu_dereference(rxqueue->rps_map);
3990         if (!flow_table && !map)
3991                 goto done;
3992
3993         skb_reset_network_header(skb);
3994         hash = skb_get_hash(skb);
3995         if (!hash)
3996                 goto done;
3997
3998         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3999         if (flow_table && sock_flow_table) {
4000                 struct rps_dev_flow *rflow;
4001                 u32 next_cpu;
4002                 u32 ident;
4003
4004                 /* First check into global flow table if there is a match */
4005                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4006                 if ((ident ^ hash) & ~rps_cpu_mask)
4007                         goto try_rps;
4008
4009                 next_cpu = ident & rps_cpu_mask;
4010
4011                 /* OK, now we know there is a match,
4012                  * we can look at the local (per receive queue) flow table
4013                  */
4014                 rflow = &flow_table->flows[hash & flow_table->mask];
4015                 tcpu = rflow->cpu;
4016
4017                 /*
4018                  * If the desired CPU (where last recvmsg was done) is
4019                  * different from current CPU (one in the rx-queue flow
4020                  * table entry), switch if one of the following holds:
4021                  *   - Current CPU is unset (>= nr_cpu_ids).
4022                  *   - Current CPU is offline.
4023                  *   - The current CPU's queue tail has advanced beyond the
4024                  *     last packet that was enqueued using this table entry.
4025                  *     This guarantees that all previous packets for the flow
4026                  *     have been dequeued, thus preserving in order delivery.
4027                  */
4028                 if (unlikely(tcpu != next_cpu) &&
4029                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4030                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4031                       rflow->last_qtail)) >= 0)) {
4032                         tcpu = next_cpu;
4033                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4034                 }
4035
4036                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4037                         *rflowp = rflow;
4038                         cpu = tcpu;
4039                         goto done;
4040                 }
4041         }
4042
4043 try_rps:
4044
4045         if (map) {
4046                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4047                 if (cpu_online(tcpu)) {
4048                         cpu = tcpu;
4049                         goto done;
4050                 }
4051         }
4052
4053 done:
4054         return cpu;
4055 }
4056
4057 #ifdef CONFIG_RFS_ACCEL
4058
4059 /**
4060  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4061  * @dev: Device on which the filter was set
4062  * @rxq_index: RX queue index
4063  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4064  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4065  *
4066  * Drivers that implement ndo_rx_flow_steer() should periodically call
4067  * this function for each installed filter and remove the filters for
4068  * which it returns %true.
4069  */
4070 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4071                          u32 flow_id, u16 filter_id)
4072 {
4073         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4074         struct rps_dev_flow_table *flow_table;
4075         struct rps_dev_flow *rflow;
4076         bool expire = true;
4077         unsigned int cpu;
4078
4079         rcu_read_lock();
4080         flow_table = rcu_dereference(rxqueue->rps_flow_table);
4081         if (flow_table && flow_id <= flow_table->mask) {
4082                 rflow = &flow_table->flows[flow_id];
4083                 cpu = READ_ONCE(rflow->cpu);
4084                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4085                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4086                            rflow->last_qtail) <
4087                      (int)(10 * flow_table->mask)))
4088                         expire = false;
4089         }
4090         rcu_read_unlock();
4091         return expire;
4092 }
4093 EXPORT_SYMBOL(rps_may_expire_flow);
4094
4095 #endif /* CONFIG_RFS_ACCEL */
4096
4097 /* Called from hardirq (IPI) context */
4098 static void rps_trigger_softirq(void *data)
4099 {
4100         struct softnet_data *sd = data;
4101
4102         ____napi_schedule(sd, &sd->backlog);
4103         sd->received_rps++;
4104 }
4105
4106 #endif /* CONFIG_RPS */
4107
4108 /*
4109  * Check if this softnet_data structure is another cpu one
4110  * If yes, queue it to our IPI list and return 1
4111  * If no, return 0
4112  */
4113 static int rps_ipi_queued(struct softnet_data *sd)
4114 {
4115 #ifdef CONFIG_RPS
4116         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4117
4118         if (sd != mysd) {
4119                 sd->rps_ipi_next = mysd->rps_ipi_list;
4120                 mysd->rps_ipi_list = sd;
4121
4122                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4123                 return 1;
4124         }
4125 #endif /* CONFIG_RPS */
4126         return 0;
4127 }
4128
4129 #ifdef CONFIG_NET_FLOW_LIMIT
4130 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4131 #endif
4132
4133 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4134 {
4135 #ifdef CONFIG_NET_FLOW_LIMIT
4136         struct sd_flow_limit *fl;
4137         struct softnet_data *sd;
4138         unsigned int old_flow, new_flow;
4139
4140         if (qlen < (netdev_max_backlog >> 1))
4141                 return false;
4142
4143         sd = this_cpu_ptr(&softnet_data);
4144
4145         rcu_read_lock();
4146         fl = rcu_dereference(sd->flow_limit);
4147         if (fl) {
4148                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4149                 old_flow = fl->history[fl->history_head];
4150                 fl->history[fl->history_head] = new_flow;
4151
4152                 fl->history_head++;
4153                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4154
4155                 if (likely(fl->buckets[old_flow]))
4156                         fl->buckets[old_flow]--;
4157
4158                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4159                         fl->count++;
4160                         rcu_read_unlock();
4161                         return true;
4162                 }
4163         }
4164         rcu_read_unlock();
4165 #endif
4166         return false;
4167 }
4168
4169 /*
4170  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4171  * queue (may be a remote CPU queue).
4172  */
4173 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4174                               unsigned int *qtail)
4175 {
4176         struct softnet_data *sd;
4177         unsigned long flags;
4178         unsigned int qlen;
4179
4180         sd = &per_cpu(softnet_data, cpu);
4181
4182         local_irq_save(flags);
4183
4184         rps_lock(sd);
4185         if (!netif_running(skb->dev))
4186                 goto drop;
4187         qlen = skb_queue_len(&sd->input_pkt_queue);
4188         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
4189                 if (qlen) {
4190 enqueue:
4191                         __skb_queue_tail(&sd->input_pkt_queue, skb);
4192                         input_queue_tail_incr_save(sd, qtail);
4193                         rps_unlock(sd);
4194                         local_irq_restore(flags);
4195                         return NET_RX_SUCCESS;
4196                 }
4197
4198                 /* Schedule NAPI for backlog device
4199                  * We can use non atomic operation since we own the queue lock
4200                  */
4201                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
4202                         if (!rps_ipi_queued(sd))
4203                                 ____napi_schedule(sd, &sd->backlog);
4204                 }
4205                 goto enqueue;
4206         }
4207
4208 drop:
4209         sd->dropped++;
4210         rps_unlock(sd);
4211
4212         local_irq_restore(flags);
4213
4214         atomic_long_inc(&skb->dev->rx_dropped);
4215         kfree_skb(skb);
4216         return NET_RX_DROP;
4217 }
4218
4219 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4220 {
4221         struct net_device *dev = skb->dev;
4222         struct netdev_rx_queue *rxqueue;
4223
4224         rxqueue = dev->_rx;
4225
4226         if (skb_rx_queue_recorded(skb)) {
4227                 u16 index = skb_get_rx_queue(skb);
4228
4229                 if (unlikely(index >= dev->real_num_rx_queues)) {
4230                         WARN_ONCE(dev->real_num_rx_queues > 1,
4231                                   "%s received packet on queue %u, but number "
4232                                   "of RX queues is %u\n",
4233                                   dev->name, index, dev->real_num_rx_queues);
4234
4235                         return rxqueue; /* Return first rxqueue */
4236                 }
4237                 rxqueue += index;
4238         }
4239         return rxqueue;
4240 }
4241
4242 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4243                                      struct xdp_buff *xdp,
4244                                      struct bpf_prog *xdp_prog)
4245 {
4246         struct netdev_rx_queue *rxqueue;
4247         void *orig_data, *orig_data_end;
4248         u32 metalen, act = XDP_DROP;
4249         int hlen, off;
4250         u32 mac_len;
4251
4252         /* Reinjected packets coming from act_mirred or similar should
4253          * not get XDP generic processing.
4254          */
4255         if (skb_cloned(skb))
4256                 return XDP_PASS;
4257
4258         /* XDP packets must be linear and must have sufficient headroom
4259          * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4260          * native XDP provides, thus we need to do it here as well.
4261          */
4262         if (skb_is_nonlinear(skb) ||
4263             skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4264                 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4265                 int troom = skb->tail + skb->data_len - skb->end;
4266
4267                 /* In case we have to go down the path and also linearize,
4268                  * then lets do the pskb_expand_head() work just once here.
4269                  */
4270                 if (pskb_expand_head(skb,
4271                                      hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4272                                      troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4273                         goto do_drop;
4274                 if (skb_linearize(skb))
4275                         goto do_drop;
4276         }
4277
4278         /* The XDP program wants to see the packet starting at the MAC
4279          * header.
4280          */
4281         mac_len = skb->data - skb_mac_header(skb);
4282         hlen = skb_headlen(skb) + mac_len;
4283         xdp->data = skb->data - mac_len;
4284         xdp->data_meta = xdp->data;
4285         xdp->data_end = xdp->data + hlen;
4286         xdp->data_hard_start = skb->data - skb_headroom(skb);
4287         orig_data_end = xdp->data_end;
4288         orig_data = xdp->data;
4289
4290         rxqueue = netif_get_rxqueue(skb);
4291         xdp->rxq = &rxqueue->xdp_rxq;
4292
4293         act = bpf_prog_run_xdp(xdp_prog, xdp);
4294
4295         off = xdp->data - orig_data;
4296         if (off > 0)
4297                 __skb_pull(skb, off);
4298         else if (off < 0)
4299                 __skb_push(skb, -off);
4300         skb->mac_header += off;
4301
4302         /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
4303          * pckt.
4304          */
4305         off = orig_data_end - xdp->data_end;
4306         if (off != 0) {
4307                 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4308                 skb->len -= off;
4309
4310         }
4311
4312         switch (act) {
4313         case XDP_REDIRECT:
4314         case XDP_TX:
4315                 __skb_push(skb, mac_len);
4316                 break;
4317         case XDP_PASS:
4318                 metalen = xdp->data - xdp->data_meta;
4319                 if (metalen)
4320                         skb_metadata_set(skb, metalen);
4321                 break;
4322         default:
4323                 bpf_warn_invalid_xdp_action(act);
4324                 /* fall through */
4325         case XDP_ABORTED:
4326                 trace_xdp_exception(skb->dev, xdp_prog, act);
4327                 /* fall through */
4328         case XDP_DROP:
4329         do_drop:
4330                 kfree_skb(skb);
4331                 break;
4332         }
4333
4334         return act;
4335 }
4336
4337 /* When doing generic XDP we have to bypass the qdisc layer and the
4338  * network taps in order to match in-driver-XDP behavior.
4339  */
4340 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4341 {
4342         struct net_device *dev = skb->dev;
4343         struct netdev_queue *txq;
4344         bool free_skb = true;
4345         int cpu, rc;
4346
4347         txq = netdev_pick_tx(dev, skb, NULL);
4348         cpu = smp_processor_id();
4349         HARD_TX_LOCK(dev, txq, cpu);
4350         if (!netif_xmit_stopped(txq)) {
4351                 rc = netdev_start_xmit(skb, dev, txq, 0);
4352                 if (dev_xmit_complete(rc))
4353                         free_skb = false;
4354         }
4355         HARD_TX_UNLOCK(dev, txq);
4356         if (free_skb) {
4357                 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4358                 kfree_skb(skb);
4359         }
4360 }
4361 EXPORT_SYMBOL_GPL(generic_xdp_tx);
4362
4363 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
4364
4365 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
4366 {
4367         if (xdp_prog) {
4368                 struct xdp_buff xdp;
4369                 u32 act;
4370                 int err;
4371
4372                 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
4373                 if (act != XDP_PASS) {
4374                         switch (act) {
4375                         case XDP_REDIRECT:
4376                                 err = xdp_do_generic_redirect(skb->dev, skb,
4377                                                               &xdp, xdp_prog);
4378                                 if (err)
4379                                         goto out_redir;
4380                                 break;
4381                         case XDP_TX:
4382                                 generic_xdp_tx(skb, xdp_prog);
4383                                 break;
4384                         }
4385                         return XDP_DROP;
4386                 }
4387         }
4388         return XDP_PASS;
4389 out_redir:
4390         kfree_skb(skb);
4391         return XDP_DROP;
4392 }
4393 EXPORT_SYMBOL_GPL(do_xdp_generic);
4394
4395 static int netif_rx_internal(struct sk_buff *skb)
4396 {
4397         int ret;
4398
4399         net_timestamp_check(netdev_tstamp_prequeue, skb);
4400
4401         trace_netif_rx(skb);
4402
4403         if (static_branch_unlikely(&generic_xdp_needed_key)) {
4404                 int ret;
4405
4406                 preempt_disable();
4407                 rcu_read_lock();
4408                 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
4409                 rcu_read_unlock();
4410                 preempt_enable();
4411
4412                 /* Consider XDP consuming the packet a success from
4413                  * the netdev point of view we do not want to count
4414                  * this as an error.
4415                  */
4416                 if (ret != XDP_PASS)
4417                         return NET_RX_SUCCESS;
4418         }
4419
4420 #ifdef CONFIG_RPS
4421         if (static_key_false(&rps_needed)) {
4422                 struct rps_dev_flow voidflow, *rflow = &voidflow;
4423                 int cpu;
4424
4425                 preempt_disable();
4426                 rcu_read_lock();
4427
4428                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
4429                 if (cpu < 0)
4430                         cpu = smp_processor_id();
4431
4432                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4433
4434                 rcu_read_unlock();
4435                 preempt_enable();
4436         } else
4437 #endif
4438         {
4439                 unsigned int qtail;
4440
4441                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4442                 put_cpu();
4443         }
4444         return ret;
4445 }
4446
4447 /**
4448  *      netif_rx        -       post buffer to the network code
4449  *      @skb: buffer to post
4450  *
4451  *      This function receives a packet from a device driver and queues it for
4452  *      the upper (protocol) levels to process.  It always succeeds. The buffer
4453  *      may be dropped during processing for congestion control or by the
4454  *      protocol layers.
4455  *
4456  *      return values:
4457  *      NET_RX_SUCCESS  (no congestion)
4458  *      NET_RX_DROP     (packet was dropped)
4459  *
4460  */
4461
4462 int netif_rx(struct sk_buff *skb)
4463 {
4464         trace_netif_rx_entry(skb);
4465
4466         return netif_rx_internal(skb);
4467 }
4468 EXPORT_SYMBOL(netif_rx);
4469
4470 int netif_rx_ni(struct sk_buff *skb)
4471 {
4472         int err;
4473
4474         trace_netif_rx_ni_entry(skb);
4475
4476         preempt_disable();
4477         err = netif_rx_internal(skb);
4478         if (local_softirq_pending())
4479                 do_softirq();
4480         preempt_enable();
4481
4482         return err;
4483 }
4484 EXPORT_SYMBOL(netif_rx_ni);
4485
4486 static __latent_entropy void net_tx_action(struct softirq_action *h)
4487 {
4488         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4489
4490         if (sd->completion_queue) {
4491                 struct sk_buff *clist;
4492
4493                 local_irq_disable();
4494                 clist = sd->completion_queue;
4495                 sd->completion_queue = NULL;
4496                 local_irq_enable();
4497
4498                 while (clist) {
4499                         struct sk_buff *skb = clist;
4500
4501                         clist = clist->next;
4502
4503                         WARN_ON(refcount_read(&skb->users));
4504                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4505                                 trace_consume_skb(skb);
4506                         else
4507                                 trace_kfree_skb(skb, net_tx_action);
4508
4509                         if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4510                                 __kfree_skb(skb);
4511                         else
4512                                 __kfree_skb_defer(skb);
4513                 }
4514
4515                 __kfree_skb_flush();
4516         }
4517
4518         if (sd->output_queue) {
4519                 struct Qdisc *head;
4520
4521                 local_irq_disable();
4522                 head = sd->output_queue;
4523                 sd->output_queue = NULL;
4524                 sd->output_queue_tailp = &sd->output_queue;
4525                 local_irq_enable();
4526
4527                 while (head) {
4528                         struct Qdisc *q = head;
4529                         spinlock_t *root_lock = NULL;
4530
4531                         head = head->next_sched;
4532
4533                         if (!(q->flags & TCQ_F_NOLOCK)) {
4534                                 root_lock = qdisc_lock(q);
4535                                 spin_lock(root_lock);
4536                         }
4537                         /* We need to make sure head->next_sched is read
4538                          * before clearing __QDISC_STATE_SCHED
4539                          */
4540                         smp_mb__before_atomic();
4541                         clear_bit(__QDISC_STATE_SCHED, &q->state);
4542                         qdisc_run(q);
4543                         if (root_lock)
4544                                 spin_unlock(root_lock);
4545                 }
4546         }
4547
4548         xfrm_dev_backlog(sd);
4549 }
4550
4551 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
4552 /* This hook is defined here for ATM LANE */
4553 int (*br_fdb_test_addr_hook)(struct net_device *dev,
4554                              unsigned char *addr) __read_mostly;
4555 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
4556 #endif
4557
4558 static inline struct sk_buff *
4559 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4560                    struct net_device *orig_dev)
4561 {
4562 #ifdef CONFIG_NET_CLS_ACT
4563         struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
4564         struct tcf_result cl_res;
4565
4566         /* If there's at least one ingress present somewhere (so
4567          * we get here via enabled static key), remaining devices
4568          * that are not configured with an ingress qdisc will bail
4569          * out here.
4570          */
4571         if (!miniq)
4572                 return skb;
4573
4574         if (*pt_prev) {
4575                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4576                 *pt_prev = NULL;
4577         }
4578
4579         qdisc_skb_cb(skb)->pkt_len = skb->len;
4580         skb->tc_at_ingress = 1;
4581         mini_qdisc_bstats_cpu_update(miniq, skb);
4582
4583         switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
4584         case TC_ACT_OK:
4585         case TC_ACT_RECLASSIFY:
4586                 skb->tc_index = TC_H_MIN(cl_res.classid);
4587                 break;
4588         case TC_ACT_SHOT:
4589                 mini_qdisc_qstats_cpu_drop(miniq);
4590                 kfree_skb(skb);
4591                 return NULL;
4592         case TC_ACT_STOLEN:
4593         case TC_ACT_QUEUED:
4594         case TC_ACT_TRAP:
4595                 consume_skb(skb);
4596                 return NULL;
4597         case TC_ACT_REDIRECT:
4598                 /* skb_mac_header check was done by cls/act_bpf, so
4599                  * we can safely push the L2 header back before
4600                  * redirecting to another netdev
4601                  */
4602                 __skb_push(skb, skb->mac_len);
4603                 skb_do_redirect(skb);
4604                 return NULL;
4605         default:
4606                 break;
4607         }
4608 #endif /* CONFIG_NET_CLS_ACT */
4609         return skb;
4610 }
4611
4612 /**
4613  *      netdev_is_rx_handler_busy - check if receive handler is registered
4614  *      @dev: device to check
4615  *
4616  *      Check if a receive handler is already registered for a given device.
4617  *      Return true if there one.
4618  *
4619  *      The caller must hold the rtnl_mutex.
4620  */
4621 bool netdev_is_rx_handler_busy(struct net_device *dev)
4622 {
4623         ASSERT_RTNL();
4624         return dev && rtnl_dereference(dev->rx_handler);
4625 }
4626 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
4627
4628 /**
4629  *      netdev_rx_handler_register - register receive handler
4630  *      @dev: device to register a handler for
4631  *      @rx_handler: receive handler to register
4632  *      @rx_handler_data: data pointer that is used by rx handler
4633  *
4634  *      Register a receive handler for a device. This handler will then be
4635  *      called from __netif_receive_skb. A negative errno code is returned
4636  *      on a failure.
4637  *
4638  *      The caller must hold the rtnl_mutex.
4639  *
4640  *      For a general description of rx_handler, see enum rx_handler_result.
4641  */
4642 int netdev_rx_handler_register(struct net_device *dev,
4643                                rx_handler_func_t *rx_handler,
4644                                void *rx_handler_data)
4645 {
4646         if (netdev_is_rx_handler_busy(dev))
4647                 return -EBUSY;
4648
4649         if (dev->priv_flags & IFF_NO_RX_HANDLER)
4650                 return -EINVAL;
4651
4652         /* Note: rx_handler_data must be set before rx_handler */
4653         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
4654         rcu_assign_pointer(dev->rx_handler, rx_handler);
4655
4656         return 0;
4657 }
4658 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
4659
4660 /**
4661  *      netdev_rx_handler_unregister - unregister receive handler
4662  *      @dev: device to unregister a handler from
4663  *
4664  *      Unregister a receive handler from a device.
4665  *
4666  *      The caller must hold the rtnl_mutex.
4667  */
4668 void netdev_rx_handler_unregister(struct net_device *dev)
4669 {
4670
4671         ASSERT_RTNL();
4672         RCU_INIT_POINTER(dev->rx_handler, NULL);
4673         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
4674          * section has a guarantee to see a non NULL rx_handler_data
4675          * as well.
4676          */
4677         synchronize_net();
4678         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
4679 }
4680 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
4681
4682 /*
4683  * Limit the use of PFMEMALLOC reserves to those protocols that implement
4684  * the special handling of PFMEMALLOC skbs.
4685  */
4686 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
4687 {
4688         switch (skb->protocol) {
4689         case htons(ETH_P_ARP):
4690         case htons(ETH_P_IP):
4691         case htons(ETH_P_IPV6):
4692         case htons(ETH_P_8021Q):
4693         case htons(ETH_P_8021AD):
4694                 return true;
4695         default:
4696                 return false;
4697         }
4698 }
4699
4700 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4701                              int *ret, struct net_device *orig_dev)
4702 {
4703 #ifdef CONFIG_NETFILTER_INGRESS
4704         if (nf_hook_ingress_active(skb)) {
4705                 int ingress_retval;
4706
4707                 if (*pt_prev) {
4708                         *ret = deliver_skb(skb, *pt_prev, orig_dev);
4709                         *pt_prev = NULL;
4710                 }
4711
4712                 rcu_read_lock();
4713                 ingress_retval = nf_hook_ingress(skb);
4714                 rcu_read_unlock();
4715                 return ingress_retval;
4716         }
4717 #endif /* CONFIG_NETFILTER_INGRESS */
4718         return 0;
4719 }
4720
4721 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
4722                                     struct packet_type **ppt_prev)
4723 {
4724         struct packet_type *ptype, *pt_prev;
4725         rx_handler_func_t *rx_handler;
4726         struct net_device *orig_dev;
4727         bool deliver_exact = false;
4728         int ret = NET_RX_DROP;
4729         __be16 type;
4730
4731         net_timestamp_check(!netdev_tstamp_prequeue, skb);
4732
4733         trace_netif_receive_skb(skb);
4734
4735         orig_dev = skb->dev;
4736
4737         skb_reset_network_header(skb);
4738         if (!skb_transport_header_was_set(skb))
4739                 skb_reset_transport_header(skb);
4740         skb_reset_mac_len(skb);
4741
4742         pt_prev = NULL;
4743
4744 another_round:
4745         skb->skb_iif = skb->dev->ifindex;
4746
4747         __this_cpu_inc(softnet_data.processed);
4748
4749         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4750             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
4751                 skb = skb_vlan_untag(skb);
4752                 if (unlikely(!skb))
4753                         goto out;
4754         }
4755
4756         if (skb_skip_tc_classify(skb))
4757                 goto skip_classify;
4758
4759         if (pfmemalloc)
4760                 goto skip_taps;
4761
4762         list_for_each_entry_rcu(ptype, &ptype_all, list) {
4763                 if (pt_prev)
4764                         ret = deliver_skb(skb, pt_prev, orig_dev);
4765                 pt_prev = ptype;
4766         }
4767
4768         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4769                 if (pt_prev)
4770                         ret = deliver_skb(skb, pt_prev, orig_dev);
4771                 pt_prev = ptype;
4772         }
4773
4774 skip_taps:
4775 #ifdef CONFIG_NET_INGRESS
4776         if (static_branch_unlikely(&ingress_needed_key)) {
4777                 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4778                 if (!skb)
4779                         goto out;
4780
4781                 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
4782                         goto out;
4783         }
4784 #endif
4785         skb_reset_tc(skb);
4786 skip_classify:
4787         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4788                 goto drop;
4789
4790         if (skb_vlan_tag_present(skb)) {
4791                 if (pt_prev) {
4792                         ret = deliver_skb(skb, pt_prev, orig_dev);
4793                         pt_prev = NULL;
4794                 }
4795                 if (vlan_do_receive(&skb))
4796                         goto another_round;
4797                 else if (unlikely(!skb))
4798                         goto out;
4799         }
4800
4801         rx_handler = rcu_dereference(skb->dev->rx_handler);
4802         if (rx_handler) {
4803                 if (pt_prev) {
4804                         ret = deliver_skb(skb, pt_prev, orig_dev);
4805                         pt_prev = NULL;
4806                 }
4807                 switch (rx_handler(&skb)) {
4808                 case RX_HANDLER_CONSUMED:
4809                         ret = NET_RX_SUCCESS;
4810                         goto out;
4811                 case RX_HANDLER_ANOTHER:
4812                         goto another_round;
4813                 case RX_HANDLER_EXACT:
4814                         deliver_exact = true;
4815                 case RX_HANDLER_PASS:
4816                         break;
4817                 default:
4818                         BUG();
4819                 }
4820         }
4821
4822         if (unlikely(skb_vlan_tag_present(skb))) {
4823                 if (skb_vlan_tag_get_id(skb))
4824                         skb->pkt_type = PACKET_OTHERHOST;
4825                 /* Note: we might in the future use prio bits
4826                  * and set skb->priority like in vlan_do_receive()
4827                  * For the time being, just ignore Priority Code Point
4828                  */
4829                 skb->vlan_tci = 0;
4830         }
4831
4832         type = skb->protocol;
4833
4834         /* deliver only exact match when indicated */
4835         if (likely(!deliver_exact)) {
4836                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4837                                        &ptype_base[ntohs(type) &
4838                                                    PTYPE_HASH_MASK]);
4839         }
4840
4841         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4842                                &orig_dev->ptype_specific);
4843
4844         if (unlikely(skb->dev != orig_dev)) {
4845                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4846                                        &skb->dev->ptype_specific);
4847         }
4848
4849         if (pt_prev) {
4850                 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
4851                         goto drop;
4852                 *ppt_prev = pt_prev;
4853         } else {
4854 drop:
4855                 if (!deliver_exact)
4856                         atomic_long_inc(&skb->dev->rx_dropped);
4857                 else
4858                         atomic_long_inc(&skb->dev->rx_nohandler);
4859                 kfree_skb(skb);
4860                 /* Jamal, now you will not able to escape explaining
4861                  * me how you were going to use this. :-)
4862                  */
4863                 ret = NET_RX_DROP;
4864         }
4865
4866 out:
4867         return ret;
4868 }
4869
4870 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
4871 {
4872         struct net_device *orig_dev = skb->dev;
4873         struct packet_type *pt_prev = NULL;
4874         int ret;
4875
4876         ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4877         if (pt_prev)
4878                 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4879         return ret;
4880 }
4881
4882 /**
4883  *      netif_receive_skb_core - special purpose version of netif_receive_skb
4884  *      @skb: buffer to process
4885  *
4886  *      More direct receive version of netif_receive_skb().  It should
4887  *      only be used by callers that have a need to skip RPS and Generic XDP.
4888  *      Caller must also take care of handling if (page_is_)pfmemalloc.
4889  *
4890  *      This function may only be called from softirq context and interrupts
4891  *      should be enabled.
4892  *
4893  *      Return values (usually ignored):
4894  *      NET_RX_SUCCESS: no congestion
4895  *      NET_RX_DROP: packet was dropped
4896  */
4897 int netif_receive_skb_core(struct sk_buff *skb)
4898 {
4899         int ret;
4900
4901         rcu_read_lock();
4902         ret = __netif_receive_skb_one_core(skb, false);
4903         rcu_read_unlock();
4904
4905         return ret;
4906 }
4907 EXPORT_SYMBOL(netif_receive_skb_core);
4908
4909 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
4910                                                   struct packet_type *pt_prev,
4911                                                   struct net_device *orig_dev)
4912 {
4913         struct sk_buff *skb, *next;
4914
4915         if (!pt_prev)
4916                 return;
4917         if (list_empty(head))
4918                 return;
4919         if (pt_prev->list_func != NULL)
4920                 pt_prev->list_func(head, pt_prev, orig_dev);
4921         else
4922                 list_for_each_entry_safe(skb, next, head, list)
4923                         pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
4924 }
4925
4926 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
4927 {
4928         /* Fast-path assumptions:
4929          * - There is no RX handler.
4930          * - Only one packet_type matches.
4931          * If either of these fails, we will end up doing some per-packet
4932          * processing in-line, then handling the 'last ptype' for the whole
4933          * sublist.  This can't cause out-of-order delivery to any single ptype,
4934          * because the 'last ptype' must be constant across the sublist, and all
4935          * other ptypes are handled per-packet.
4936          */
4937         /* Current (common) ptype of sublist */
4938         struct packet_type *pt_curr = NULL;
4939         /* Current (common) orig_dev of sublist */
4940         struct net_device *od_curr = NULL;
4941         struct list_head sublist;
4942         struct sk_buff *skb, *next;
4943
4944         INIT_LIST_HEAD(&sublist);
4945         list_for_each_entry_safe(skb, next, head, list) {
4946                 struct net_device *orig_dev = skb->dev;
4947                 struct packet_type *pt_prev = NULL;
4948
4949                 list_del(&skb->list);
4950                 __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
4951                 if (!pt_prev)
4952                         continue;
4953                 if (pt_curr != pt_prev || od_curr != orig_dev) {
4954                         /* dispatch old sublist */
4955                         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4956                         /* start new sublist */
4957                         INIT_LIST_HEAD(&sublist);
4958                         pt_curr = pt_prev;
4959                         od_curr = orig_dev;
4960                 }
4961                 list_add_tail(&skb->list, &sublist);
4962         }
4963
4964         /* dispatch final sublist */
4965         __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4966 }
4967
4968 static int __netif_receive_skb(struct sk_buff *skb)
4969 {
4970         int ret;
4971
4972         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4973                 unsigned int noreclaim_flag;
4974
4975                 /*
4976                  * PFMEMALLOC skbs are special, they should
4977                  * - be delivered to SOCK_MEMALLOC sockets only
4978                  * - stay away from userspace
4979                  * - have bounded memory usage
4980                  *
4981                  * Use PF_MEMALLOC as this saves us from propagating the allocation
4982                  * context down to all allocation sites.
4983                  */
4984                 noreclaim_flag = memalloc_noreclaim_save();
4985                 ret = __netif_receive_skb_one_core(skb, true);
4986                 memalloc_noreclaim_restore(noreclaim_flag);
4987         } else
4988                 ret = __netif_receive_skb_one_core(skb, false);
4989
4990         return ret;
4991 }
4992
4993 static void __netif_receive_skb_list(struct list_head *head)
4994 {
4995         unsigned long noreclaim_flag = 0;
4996         struct sk_buff *skb, *next;
4997         bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
4998
4999         list_for_each_entry_safe(skb, next, head, list) {
5000                 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5001                         struct list_head sublist;
5002
5003                         /* Handle the previous sublist */
5004                         list_cut_before(&sublist, head, &skb->list);
5005                         if (!list_empty(&sublist))
5006                                 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5007                         pfmemalloc = !pfmemalloc;
5008                         /* See comments in __netif_receive_skb */
5009                         if (pfmemalloc)
5010                                 noreclaim_flag = memalloc_noreclaim_save();
5011                         else
5012                                 memalloc_noreclaim_restore(noreclaim_flag);
5013                 }
5014         }
5015         /* Handle the remaining sublist */
5016         if (!list_empty(head))
5017                 __netif_receive_skb_list_core(head, pfmemalloc);
5018         /* Restore pflags */
5019         if (pfmemalloc)
5020                 memalloc_noreclaim_restore(noreclaim_flag);
5021 }
5022
5023 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5024 {
5025         struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5026         struct bpf_prog *new = xdp->prog;
5027         int ret = 0;
5028
5029         switch (xdp->command) {
5030         case XDP_SETUP_PROG:
5031                 rcu_assign_pointer(dev->xdp_prog, new);
5032                 if (old)
5033                         bpf_prog_put(old);
5034
5035                 if (old && !new) {
5036                         static_branch_dec(&generic_xdp_needed_key);
5037                 } else if (new && !old) {
5038                         static_branch_inc(&generic_xdp_needed_key);
5039                         dev_disable_lro(dev);
5040                         dev_disable_gro_hw(dev);
5041                 }
5042                 break;
5043
5044         case XDP_QUERY_PROG:
5045                 xdp->prog_id = old ? old->aux->id : 0;
5046                 break;
5047
5048         default:
5049                 ret = -EINVAL;
5050                 break;
5051         }
5052
5053         return ret;
5054 }
5055
5056 static int netif_receive_skb_internal(struct sk_buff *skb)
5057 {
5058         int ret;
5059
5060         net_timestamp_check(netdev_tstamp_prequeue, skb);
5061
5062         if (skb_defer_rx_timestamp(skb))
5063                 return NET_RX_SUCCESS;
5064
5065         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5066                 int ret;
5067
5068                 preempt_disable();
5069                 rcu_read_lock();
5070                 ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5071                 rcu_read_unlock();
5072                 preempt_enable();
5073
5074                 if (ret != XDP_PASS)
5075                         return NET_RX_DROP;
5076         }
5077
5078         rcu_read_lock();
5079 #ifdef CONFIG_RPS
5080         if (static_key_false(&rps_needed)) {
5081                 struct rps_dev_flow voidflow, *rflow = &voidflow;
5082                 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5083
5084                 if (cpu >= 0) {
5085                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5086                         rcu_read_unlock();
5087                         return ret;
5088                 }
5089         }
5090 #endif
5091         ret = __netif_receive_skb(skb);
5092         rcu_read_unlock();
5093         return ret;
5094 }
5095
5096 static void netif_receive_skb_list_internal(struct list_head *head)
5097 {
5098         struct bpf_prog *xdp_prog = NULL;
5099         struct sk_buff *skb, *next;
5100         struct list_head sublist;
5101
5102         INIT_LIST_HEAD(&sublist);
5103         list_for_each_entry_safe(skb, next, head, list) {
5104                 net_timestamp_check(netdev_tstamp_prequeue, skb);
5105                 list_del(&skb->list);
5106                 if (!skb_defer_rx_timestamp(skb))
5107                         list_add_tail(&skb->list, &sublist);
5108         }
5109         list_splice_init(&sublist, head);
5110
5111         if (static_branch_unlikely(&generic_xdp_needed_key)) {
5112                 preempt_disable();
5113                 rcu_read_lock();
5114                 list_for_each_entry_safe(skb, next, head, list) {
5115                         xdp_prog = rcu_dereference(skb->dev->xdp_prog);
5116                         list_del(&skb->list);
5117                         if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
5118                                 list_add_tail(&skb->list, &sublist);
5119                 }
5120                 rcu_read_unlock();
5121                 preempt_enable();
5122                 /* Put passed packets back on main list */
5123                 list_splice_init(&sublist, head);
5124         }
5125
5126         rcu_read_lock();
5127 #ifdef CONFIG_RPS
5128         if (static_key_false(&rps_needed)) {
5129                 list_for_each_entry_safe(skb, next, head, list) {
5130                         struct rps_dev_flow voidflow, *rflow = &voidflow;
5131                         int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5132
5133                         if (cpu >= 0) {
5134                                 /* Will be handled, remove from list */
5135                                 list_del(&skb->list);
5136                                 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5137                         }
5138                 }
5139         }
5140 #endif
5141         __netif_receive_skb_list(head);
5142         rcu_read_unlock();
5143 }
5144
5145 /**
5146  *      netif_receive_skb - process receive buffer from network
5147  *      @skb: buffer to process
5148  *
5149  *      netif_receive_skb() is the main receive data processing function.
5150  *      It always succeeds. The buffer may be dropped during processing
5151  *      for congestion control or by the protocol layers.
5152  *
5153  *      This function may only be called from softirq context and interrupts
5154  *      should be enabled.
5155  *
5156  *      Return values (usually ignored):
5157  *      NET_RX_SUCCESS: no congestion
5158  *      NET_RX_DROP: packet was dropped
5159  */
5160 int netif_receive_skb(struct sk_buff *skb)
5161 {
5162         trace_netif_receive_skb_entry(skb);
5163
5164         return netif_receive_skb_internal(skb);
5165 }
5166 EXPORT_SYMBOL(netif_receive_skb);
5167
5168 /**
5169  *      netif_receive_skb_list - process many receive buffers from network
5170  *      @head: list of skbs to process.
5171  *
5172  *      Since return value of netif_receive_skb() is normally ignored, and
5173  *      wouldn't be meaningful for a list, this function returns void.
5174  *
5175  *      This function may only be called from softirq context and interrupts
5176  *      should be enabled.
5177  */
5178 void netif_receive_skb_list(struct list_head *head)
5179 {
5180         struct sk_buff *skb;
5181
5182         if (list_empty(head))
5183                 return;
5184         list_for_each_entry(skb, head, list)
5185                 trace_netif_receive_skb_list_entry(skb);
5186         netif_receive_skb_list_internal(head);
5187 }
5188 EXPORT_SYMBOL(netif_receive_skb_list);
5189
5190 DEFINE_PER_CPU(struct work_struct, flush_works);
5191
5192 /* Network device is going away, flush any packets still pending */
5193 static void flush_backlog(struct work_struct *work)
5194 {
5195         struct sk_buff *skb, *tmp;
5196         struct softnet_data *sd;
5197
5198         local_bh_disable();
5199         sd = this_cpu_ptr(&softnet_data);
5200
5201         local_irq_disable();
5202         rps_lock(sd);
5203         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5204                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5205                         __skb_unlink(skb, &sd->input_pkt_queue);
5206                         kfree_skb(skb);
5207                         input_queue_head_incr(sd);
5208                 }
5209         }
5210         rps_unlock(sd);
5211         local_irq_enable();
5212
5213         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5214                 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5215                         __skb_unlink(skb, &sd->process_queue);
5216                         kfree_skb(skb);
5217                         input_queue_head_incr(sd);
5218                 }
5219         }
5220         local_bh_enable();
5221 }
5222
5223 static void flush_all_backlogs(void)
5224 {
5225         unsigned int cpu;
5226
5227         get_online_cpus();
5228
5229         for_each_online_cpu(cpu)
5230                 queue_work_on(cpu, system_highpri_wq,
5231                               per_cpu_ptr(&flush_works, cpu));
5232
5233         for_each_online_cpu(cpu)
5234                 flush_work(per_cpu_ptr(&flush_works, cpu));
5235
5236         put_online_cpus();
5237 }
5238
5239 static int napi_gro_complete(struct sk_buff *skb)
5240 {
5241         struct packet_offload *ptype;
5242         __be16 type = skb->protocol;
5243         struct list_head *head = &offload_base;
5244         int err = -ENOENT;
5245
5246         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5247
5248         if (NAPI_GRO_CB(skb)->count == 1) {
5249                 skb_shinfo(skb)->gso_size = 0;
5250                 goto out;
5251         }
5252
5253         rcu_read_lock();
5254         list_for_each_entry_rcu(ptype, head, list) {
5255                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5256                         continue;
5257
5258                 err = ptype->callbacks.gro_complete(skb, 0);
5259                 break;
5260         }
5261         rcu_read_unlock();
5262
5263         if (err) {
5264                 WARN_ON(&ptype->list == head);
5265                 kfree_skb(skb);
5266                 return NET_RX_SUCCESS;
5267         }
5268
5269 out:
5270         return netif_receive_skb_internal(skb);
5271 }
5272
5273 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
5274                                    bool flush_old)
5275 {
5276         struct list_head *head = &napi->gro_hash[index].list;
5277         struct sk_buff *skb, *p;
5278
5279         list_for_each_entry_safe_reverse(skb, p, head, list) {
5280                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5281                         return;
5282                 list_del(&skb->list);
5283                 skb->next = NULL;
5284                 napi_gro_complete(skb);
5285                 napi->gro_hash[index].count--;
5286         }
5287
5288         if (!napi->gro_hash[index].count)
5289                 __clear_bit(index, &napi->gro_bitmask);
5290 }
5291
5292 /* napi->gro_hash[].list contains packets ordered by age.
5293  * youngest packets at the head of it.
5294  * Complete skbs in reverse order to reduce latencies.
5295  */
5296 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5297 {
5298         u32 i;
5299
5300         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
5301                 if (test_bit(i, &napi->gro_bitmask))
5302                         __napi_gro_flush_chain(napi, i, flush_old);
5303         }
5304 }
5305 EXPORT_SYMBOL(napi_gro_flush);
5306
5307 static struct list_head *gro_list_prepare(struct napi_struct *napi,
5308                                           struct sk_buff *skb)
5309 {
5310         unsigned int maclen = skb->dev->hard_header_len;
5311         u32 hash = skb_get_hash_raw(skb);
5312         struct list_head *head;
5313         struct sk_buff *p;
5314
5315         head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
5316         list_for_each_entry(p, head, list) {
5317                 unsigned long diffs;
5318
5319                 NAPI_GRO_CB(p)->flush = 0;
5320
5321                 if (hash != skb_get_hash_raw(p)) {
5322                         NAPI_GRO_CB(p)->same_flow = 0;
5323                         continue;
5324                 }
5325
5326                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
5327                 diffs |= p->vlan_tci ^ skb->vlan_tci;
5328                 diffs |= skb_metadata_dst_cmp(p, skb);
5329                 diffs |= skb_metadata_differs(p, skb);
5330                 if (maclen == ETH_HLEN)
5331                         diffs |= compare_ether_header(skb_mac_header(p),
5332                                                       skb_mac_header(skb));
5333                 else if (!diffs)
5334                         diffs = memcmp(skb_mac_header(p),
5335                                        skb_mac_header(skb),
5336                                        maclen);
5337                 NAPI_GRO_CB(p)->same_flow = !diffs;
5338         }
5339
5340         return head;
5341 }
5342
5343 static void skb_gro_reset_offset(struct sk_buff *skb)
5344 {
5345         const struct skb_shared_info *pinfo = skb_shinfo(skb);
5346         const skb_frag_t *frag0 = &pinfo->frags[0];
5347
5348         NAPI_GRO_CB(skb)->data_offset = 0;
5349         NAPI_GRO_CB(skb)->frag0 = NULL;
5350         NAPI_GRO_CB(skb)->frag0_len = 0;
5351
5352         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
5353             pinfo->nr_frags &&
5354             !PageHighMem(skb_frag_page(frag0))) {
5355                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
5356                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5357                                                     skb_frag_size(frag0),
5358                                                     skb->end - skb->tail);
5359         }
5360 }
5361
5362 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5363 {
5364         struct skb_shared_info *pinfo = skb_shinfo(skb);
5365
5366         BUG_ON(skb->end - skb->tail < grow);
5367
5368         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5369
5370         skb->data_len -= grow;
5371         skb->tail += grow;
5372
5373         pinfo->frags[0].page_offset += grow;
5374         skb_frag_size_sub(&pinfo->frags[0], grow);
5375
5376         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5377                 skb_frag_unref(skb, 0);
5378                 memmove(pinfo->frags, pinfo->frags + 1,
5379                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5380         }
5381 }
5382
5383 static void gro_flush_oldest(struct list_head *head)
5384 {
5385         struct sk_buff *oldest;
5386
5387         oldest = list_last_entry(head, struct sk_buff, list);
5388
5389         /* We are called with head length >= MAX_GRO_SKBS, so this is
5390          * impossible.
5391          */
5392         if (WARN_ON_ONCE(!oldest))
5393                 return;
5394
5395         /* Do not adjust napi->gro_hash[].count, caller is adding a new
5396          * SKB to the chain.
5397          */
5398         list_del(&oldest->list);
5399         napi_gro_complete(oldest);
5400 }
5401
5402 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5403 {
5404         u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
5405         struct list_head *head = &offload_base;
5406         struct packet_offload *ptype;
5407         __be16 type = skb->protocol;
5408         struct list_head *gro_head;
5409         struct sk_buff *pp = NULL;
5410         enum gro_result ret;
5411         int same_flow;
5412         int grow;
5413
5414         if (netif_elide_gro(skb->dev))
5415                 goto normal;
5416
5417         gro_head = gro_list_prepare(napi, skb);
5418
5419         rcu_read_lock();
5420         list_for_each_entry_rcu(ptype, head, list) {
5421                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5422                         continue;
5423
5424                 skb_set_network_header(skb, skb_gro_offset(skb));
5425                 skb_reset_mac_len(skb);
5426                 NAPI_GRO_CB(skb)->same_flow = 0;
5427                 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5428                 NAPI_GRO_CB(skb)->free = 0;
5429                 NAPI_GRO_CB(skb)->encap_mark = 0;
5430                 NAPI_GRO_CB(skb)->recursion_counter = 0;
5431                 NAPI_GRO_CB(skb)->is_fou = 0;
5432                 NAPI_GRO_CB(skb)->is_atomic = 1;
5433                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
5434
5435                 /* Setup for GRO checksum validation */
5436                 switch (skb->ip_summed) {
5437                 case CHECKSUM_COMPLETE:
5438                         NAPI_GRO_CB(skb)->csum = skb->csum;
5439                         NAPI_GRO_CB(skb)->csum_valid = 1;
5440                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5441                         break;
5442                 case CHECKSUM_UNNECESSARY:
5443                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5444                         NAPI_GRO_CB(skb)->csum_valid = 0;
5445                         break;
5446                 default:
5447                         NAPI_GRO_CB(skb)->csum_cnt = 0;
5448                         NAPI_GRO_CB(skb)->csum_valid = 0;
5449                 }
5450
5451                 pp = ptype->callbacks.gro_receive(gro_head, skb);
5452                 break;
5453         }
5454         rcu_read_unlock();
5455
5456         if (&ptype->list == head)
5457                 goto normal;
5458
5459         if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
5460                 ret = GRO_CONSUMED;
5461                 goto ok;
5462         }
5463
5464         same_flow = NAPI_GRO_CB(skb)->same_flow;
5465         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5466
5467         if (pp) {
5468                 list_del(&pp->list);
5469                 pp->next = NULL;
5470                 napi_gro_complete(pp);
5471                 napi->gro_hash[hash].count--;
5472         }
5473
5474         if (same_flow)
5475                 goto ok;
5476
5477         if (NAPI_GRO_CB(skb)->flush)
5478                 goto normal;
5479
5480         if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
5481                 gro_flush_oldest(gro_head);
5482         } else {
5483                 napi->gro_hash[hash].count++;
5484         }
5485         NAPI_GRO_CB(skb)->count = 1;
5486         NAPI_GRO_CB(skb)->age = jiffies;
5487         NAPI_GRO_CB(skb)->last = skb;
5488         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5489         list_add(&skb->list, gro_head);
5490         ret = GRO_HELD;
5491
5492 pull:
5493         grow = skb_gro_offset(skb) - skb_headlen(skb);
5494         if (grow > 0)
5495                 gro_pull_from_frag0(skb, grow);
5496 ok:
5497         if (napi->gro_hash[hash].count) {
5498                 if (!test_bit(hash, &napi->gro_bitmask))
5499                         __set_bit(hash, &napi->gro_bitmask);
5500         } else if (test_bit(hash, &napi->gro_bitmask)) {
5501                 __clear_bit(hash, &napi->gro_bitmask);
5502         }
5503
5504         return ret;
5505
5506 normal:
5507         ret = GRO_NORMAL;
5508         goto pull;
5509 }
5510
5511 struct packet_offload *gro_find_receive_by_type(__be16 type)
5512 {
5513         struct list_head *offload_head = &offload_base;
5514         struct packet_offload *ptype;
5515
5516         list_for_each_entry_rcu(ptype, offload_head, list) {
5517                 if (ptype->type != type || !ptype->callbacks.gro_receive)
5518                         continue;
5519                 return ptype;
5520         }
5521         return NULL;
5522 }
5523 EXPORT_SYMBOL(gro_find_receive_by_type);
5524
5525 struct packet_offload *gro_find_complete_by_type(__be16 type)
5526 {
5527         struct list_head *offload_head = &offload_base;
5528         struct packet_offload *ptype;
5529
5530         list_for_each_entry_rcu(ptype, offload_head, list) {
5531                 if (ptype->type != type || !ptype->callbacks.gro_complete)
5532                         continue;
5533                 return ptype;
5534         }
5535         return NULL;
5536 }
5537 EXPORT_SYMBOL(gro_find_complete_by_type);
5538
5539 static void napi_skb_free_stolen_head(struct sk_buff *skb)
5540 {
5541         skb_dst_drop(skb);
5542         secpath_reset(skb);
5543         kmem_cache_free(skbuff_head_cache, skb);
5544 }
5545
5546 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5547 {
5548         switch (ret) {
5549         case GRO_NORMAL:
5550                 if (netif_receive_skb_internal(skb))
5551                         ret = GRO_DROP;
5552                 break;
5553
5554         case GRO_DROP:
5555                 kfree_skb(skb);
5556                 break;
5557
5558         case GRO_MERGED_FREE:
5559                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5560                         napi_skb_free_stolen_head(skb);
5561                 else
5562                         __kfree_skb(skb);
5563                 break;
5564
5565         case GRO_HELD:
5566         case GRO_MERGED:
5567         case GRO_CONSUMED:
5568                 break;
5569         }
5570
5571         return ret;
5572 }
5573
5574 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5575 {
5576         skb_mark_napi_id(skb, napi);
5577         trace_napi_gro_receive_entry(skb);
5578
5579         skb_gro_reset_offset(skb);
5580
5581         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
5582 }
5583 EXPORT_SYMBOL(napi_gro_receive);
5584
5585 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
5586 {
5587         if (unlikely(skb->pfmemalloc)) {
5588                 consume_skb(skb);
5589                 return;
5590         }
5591         __skb_pull(skb, skb_headlen(skb));
5592         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
5593         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
5594         skb->vlan_tci = 0;
5595         skb->dev = napi->dev;
5596         skb->skb_iif = 0;
5597         skb->encapsulation = 0;
5598         skb_shinfo(skb)->gso_type = 0;
5599         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5600         secpath_reset(skb);
5601
5602         napi->skb = skb;
5603 }
5604
5605 struct sk_buff *napi_get_frags(struct napi_struct *napi)
5606 {
5607         struct sk_buff *skb = napi->skb;
5608
5609         if (!skb) {
5610                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
5611                 if (skb) {
5612                         napi->skb = skb;
5613                         skb_mark_napi_id(skb, napi);
5614                 }
5615         }
5616         return skb;
5617 }
5618 EXPORT_SYMBOL(napi_get_frags);
5619
5620 static gro_result_t napi_frags_finish(struct napi_struct *napi,
5621                                       struct sk_buff *skb,
5622                                       gro_result_t ret)
5623 {
5624         switch (ret) {
5625         case GRO_NORMAL:
5626         case GRO_HELD:
5627                 __skb_push(skb, ETH_HLEN);
5628                 skb->protocol = eth_type_trans(skb, skb->dev);
5629                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
5630                         ret = GRO_DROP;
5631                 break;
5632
5633         case GRO_DROP:
5634                 napi_reuse_skb(napi, skb);
5635                 break;
5636
5637         case GRO_MERGED_FREE:
5638                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
5639                         napi_skb_free_stolen_head(skb);
5640                 else
5641                         napi_reuse_skb(napi, skb);
5642                 break;
5643
5644         case GRO_MERGED:
5645         case GRO_CONSUMED:
5646                 break;
5647         }
5648
5649         return ret;
5650 }
5651
5652 /* Upper GRO stack assumes network header starts at gro_offset=0
5653  * Drivers could call both napi_gro_frags() and napi_gro_receive()
5654  * We copy ethernet header into skb->data to have a common layout.
5655  */
5656 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
5657 {
5658         struct sk_buff *skb = napi->skb;
5659         const struct ethhdr *eth;
5660         unsigned int hlen = sizeof(*eth);
5661
5662         napi->skb = NULL;
5663
5664         skb_reset_mac_header(skb);
5665         skb_gro_reset_offset(skb);
5666
5667         eth = skb_gro_header_fast(skb, 0);
5668         if (unlikely(skb_gro_header_hard(skb, hlen))) {
5669                 eth = skb_gro_header_slow(skb, hlen, 0);
5670                 if (unlikely(!eth)) {
5671                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
5672                                              __func__, napi->dev->name);
5673                         napi_reuse_skb(napi, skb);
5674                         return NULL;
5675                 }
5676         } else {
5677                 gro_pull_from_frag0(skb, hlen);
5678                 NAPI_GRO_CB(skb)->frag0 += hlen;
5679                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
5680         }
5681         __skb_pull(skb, hlen);
5682
5683         /*
5684          * This works because the only protocols we care about don't require
5685          * special handling.
5686          * We'll fix it up properly in napi_frags_finish()
5687          */
5688         skb->protocol = eth->h_proto;
5689
5690         return skb;
5691 }
5692
5693 gro_result_t napi_gro_frags(struct napi_struct *napi)
5694 {
5695         struct sk_buff *skb = napi_frags_skb(napi);
5696
5697         if (!skb)
5698                 return GRO_DROP;
5699
5700         trace_napi_gro_frags_entry(skb);
5701
5702         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5703 }
5704 EXPORT_SYMBOL(napi_gro_frags);
5705
5706 /* Compute the checksum from gro_offset and return the folded value
5707  * after adding in any pseudo checksum.
5708  */
5709 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
5710 {
5711         __wsum wsum;
5712         __sum16 sum;
5713
5714         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
5715
5716         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
5717         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
5718         if (likely(!sum)) {
5719                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
5720                     !skb->csum_complete_sw)
5721                         netdev_rx_csum_fault(skb->dev);
5722         }
5723
5724         NAPI_GRO_CB(skb)->csum = wsum;
5725         NAPI_GRO_CB(skb)->csum_valid = 1;
5726
5727         return sum;
5728 }
5729 EXPORT_SYMBOL(__skb_gro_checksum_complete);
5730
5731 static void net_rps_send_ipi(struct softnet_data *remsd)
5732 {
5733 #ifdef CONFIG_RPS
5734         while (remsd) {
5735                 struct softnet_data *next = remsd->rps_ipi_next;
5736
5737                 if (cpu_online(remsd->cpu))
5738                         smp_call_function_single_async(remsd->cpu, &remsd->csd);
5739                 remsd = next;
5740         }
5741 #endif
5742 }
5743
5744 /*
5745  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5746  * Note: called with local irq disabled, but exits with local irq enabled.
5747  */
5748 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5749 {
5750 #ifdef CONFIG_RPS
5751         struct softnet_data *remsd = sd->rps_ipi_list;
5752
5753         if (remsd) {
5754                 sd->rps_ipi_list = NULL;
5755
5756                 local_irq_enable();
5757
5758                 /* Send pending IPI's to kick RPS processing on remote cpus. */
5759                 net_rps_send_ipi(remsd);
5760         } else
5761 #endif
5762                 local_irq_enable();
5763 }
5764
5765 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5766 {
5767 #ifdef CONFIG_RPS
5768         return sd->rps_ipi_list != NULL;
5769 #else
5770         return false;
5771 #endif
5772 }
5773
5774 static int process_backlog(struct napi_struct *napi, int quota)
5775 {
5776         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5777         bool again = true;
5778         int work = 0;
5779
5780         /* Check if we have pending ipi, its better to send them now,
5781          * not waiting net_rx_action() end.
5782          */
5783         if (sd_has_rps_ipi_waiting(sd)) {
5784                 local_irq_disable();
5785                 net_rps_action_and_irq_enable(sd);
5786         }
5787
5788         napi->weight = dev_rx_weight;
5789         while (again) {
5790                 struct sk_buff *skb;
5791
5792                 while ((skb = __skb_dequeue(&sd->process_queue))) {
5793                         rcu_read_lock();
5794                         __netif_receive_skb(skb);
5795                         rcu_read_unlock();
5796                         input_queue_head_incr(sd);
5797                         if (++work >= quota)
5798                                 return work;
5799
5800                 }
5801
5802                 local_irq_disable();
5803                 rps_lock(sd);
5804                 if (skb_queue_empty(&sd->input_pkt_queue)) {
5805                         /*
5806                          * Inline a custom version of __napi_complete().
5807                          * only current cpu owns and manipulates this napi,
5808                          * and NAPI_STATE_SCHED is the only possible flag set
5809                          * on backlog.
5810                          * We can use a plain write instead of clear_bit(),
5811                          * and we dont need an smp_mb() memory barrier.
5812                          */
5813                         napi->state = 0;
5814                         again = false;
5815                 } else {
5816                         skb_queue_splice_tail_init(&sd->input_pkt_queue,
5817                                                    &sd->process_queue);
5818                 }
5819                 rps_unlock(sd);
5820                 local_irq_enable();
5821         }
5822
5823         return work;
5824 }
5825
5826 /**
5827  * __napi_schedule - schedule for receive
5828  * @n: entry to schedule
5829  *
5830  * The entry's receive function will be scheduled to run.
5831  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
5832  */
5833 void __napi_schedule(struct napi_struct *n)
5834 {
5835         unsigned long flags;
5836
5837         local_irq_save(flags);
5838         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5839         local_irq_restore(flags);
5840 }
5841 EXPORT_SYMBOL(__napi_schedule);
5842
5843 /**
5844  *      napi_schedule_prep - check if napi can be scheduled
5845  *      @n: napi context
5846  *
5847  * Test if NAPI routine is already running, and if not mark
5848  * it as running.  This is used as a condition variable
5849  * insure only one NAPI poll instance runs.  We also make
5850  * sure there is no pending NAPI disable.
5851  */
5852 bool napi_schedule_prep(struct napi_struct *n)
5853 {
5854         unsigned long val, new;
5855
5856         do {
5857                 val = READ_ONCE(n->state);
5858                 if (unlikely(val & NAPIF_STATE_DISABLE))
5859                         return false;
5860                 new = val | NAPIF_STATE_SCHED;
5861
5862                 /* Sets STATE_MISSED bit if STATE_SCHED was already set
5863                  * This was suggested by Alexander Duyck, as compiler
5864                  * emits better code than :
5865                  * if (val & NAPIF_STATE_SCHED)
5866                  *     new |= NAPIF_STATE_MISSED;
5867                  */
5868                 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
5869                                                    NAPIF_STATE_MISSED;
5870         } while (cmpxchg(&n->state, val, new) != val);
5871
5872         return !(val & NAPIF_STATE_SCHED);
5873 }
5874 EXPORT_SYMBOL(napi_schedule_prep);
5875
5876 /**
5877  * __napi_schedule_irqoff - schedule for receive
5878  * @n: entry to schedule
5879  *
5880  * Variant of __napi_schedule() assuming hard irqs are masked
5881  */
5882 void __napi_schedule_irqoff(struct napi_struct *n)
5883 {
5884         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
5885 }
5886 EXPORT_SYMBOL(__napi_schedule_irqoff);
5887
5888 bool napi_complete_done(struct napi_struct *n, int work_done)
5889 {
5890         unsigned long flags, val, new;
5891
5892         /*
5893          * 1) Don't let napi dequeue from the cpu poll list
5894          *    just in case its running on a different cpu.
5895          * 2) If we are busy polling, do nothing here, we have
5896          *    the guarantee we will be called later.
5897          */
5898         if (unlikely(n->state & (NAPIF_STATE_NPSVC |
5899                                  NAPIF_STATE_IN_BUSY_POLL)))
5900                 return false;
5901
5902         if (n->gro_bitmask) {
5903                 unsigned long timeout = 0;
5904
5905                 if (work_done)
5906                         timeout = n->dev->gro_flush_timeout;
5907
5908                 if (timeout)
5909                         hrtimer_start(&n->timer, ns_to_ktime(timeout),
5910                                       HRTIMER_MODE_REL_PINNED);
5911                 else
5912                         napi_gro_flush(n, false);
5913         }
5914         if (unlikely(!list_empty(&n->poll_list))) {
5915                 /* If n->poll_list is not empty, we need to mask irqs */
5916                 local_irq_save(flags);
5917                 list_del_init(&n->poll_list);
5918                 local_irq_restore(flags);
5919         }
5920
5921         do {
5922                 val = READ_ONCE(n->state);
5923
5924                 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
5925
5926                 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED);
5927
5928                 /* If STATE_MISSED was set, leave STATE_SCHED set,
5929                  * because we will call napi->poll() one more time.
5930                  * This C code was suggested by Alexander Duyck to help gcc.
5931                  */
5932                 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
5933                                                     NAPIF_STATE_SCHED;
5934         } while (cmpxchg(&n->state, val, new) != val);
5935
5936         if (unlikely(val & NAPIF_STATE_MISSED)) {
5937                 __napi_schedule(n);
5938                 return false;
5939         }
5940
5941         return true;
5942 }
5943 EXPORT_SYMBOL(napi_complete_done);
5944
5945 /* must be called under rcu_read_lock(), as we dont take a reference */
5946 static struct napi_struct *napi_by_id(unsigned int napi_id)
5947 {
5948         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
5949         struct napi_struct *napi;
5950
5951         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
5952                 if (napi->napi_id == napi_id)
5953                         return napi;
5954
5955         return NULL;
5956 }
5957
5958 #if defined(CONFIG_NET_RX_BUSY_POLL)
5959
5960 #define BUSY_POLL_BUDGET 8
5961
5962 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
5963 {
5964         int rc;
5965
5966         /* Busy polling means there is a high chance device driver hard irq
5967          * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
5968          * set in napi_schedule_prep().
5969          * Since we are about to call napi->poll() once more, we can safely
5970          * clear NAPI_STATE_MISSED.
5971          *
5972          * Note: x86 could use a single "lock and ..." instruction
5973          * to perform these two clear_bit()
5974          */
5975         clear_bit(NAPI_STATE_MISSED, &napi->state);
5976         clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
5977
5978         local_bh_disable();
5979
5980         /* All we really want here is to re-enable device interrupts.
5981          * Ideally, a new ndo_busy_poll_stop() could avoid another round.
5982          */
5983         rc = napi->poll(napi, BUSY_POLL_BUDGET);
5984         trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
5985         netpoll_poll_unlock(have_poll_lock);
5986         if (rc == BUSY_POLL_BUDGET)
5987                 __napi_schedule(napi);
5988         local_bh_enable();
5989 }
5990
5991 void napi_busy_loop(unsigned int napi_id,
5992                     bool (*loop_end)(void *, unsigned long),
5993                     void *loop_end_arg)
5994 {
5995         unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
5996         int (*napi_poll)(struct napi_struct *napi, int budget);
5997         void *have_poll_lock = NULL;
5998         struct napi_struct *napi;
5999
6000 restart:
6001         napi_poll = NULL;
6002
6003         rcu_read_lock();
6004
6005         napi = napi_by_id(napi_id);
6006         if (!napi)
6007                 goto out;
6008
6009         preempt_disable();
6010         for (;;) {
6011                 int work = 0;
6012
6013                 local_bh_disable();
6014                 if (!napi_poll) {
6015                         unsigned long val = READ_ONCE(napi->state);
6016
6017                         /* If multiple threads are competing for this napi,
6018                          * we avoid dirtying napi->state as much as we can.
6019                          */
6020                         if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6021                                    NAPIF_STATE_IN_BUSY_POLL))
6022                                 goto count;
6023                         if (cmpxchg(&napi->state, val,
6024                                     val | NAPIF_STATE_IN_BUSY_POLL |
6025                                           NAPIF_STATE_SCHED) != val)
6026                                 goto count;
6027                         have_poll_lock = netpoll_poll_lock(napi);
6028                         napi_poll = napi->poll;
6029                 }
6030                 work = napi_poll(napi, BUSY_POLL_BUDGET);
6031                 trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
6032 count:
6033                 if (work > 0)
6034                         __NET_ADD_STATS(dev_net(napi->dev),
6035                                         LINUX_MIB_BUSYPOLLRXPACKETS, work);
6036                 local_bh_enable();
6037
6038                 if (!loop_end || loop_end(loop_end_arg, start_time))
6039                         break;
6040
6041                 if (unlikely(need_resched())) {
6042                         if (napi_poll)
6043                                 busy_poll_stop(napi, have_poll_lock);
6044                         preempt_enable();
6045                         rcu_read_unlock();
6046                         cond_resched();
6047                         if (loop_end(loop_end_arg, start_time))
6048                                 return;
6049                         goto restart;
6050                 }
6051                 cpu_relax();
6052         }
6053         if (napi_poll)
6054                 busy_poll_stop(napi, have_poll_lock);
6055         preempt_enable();
6056 out:
6057         rcu_read_unlock();
6058 }
6059 EXPORT_SYMBOL(napi_busy_loop);
6060
6061 #endif /* CONFIG_NET_RX_BUSY_POLL */
6062
6063 static void napi_hash_add(struct napi_struct *napi)
6064 {
6065         if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
6066             test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
6067                 return;
6068
6069         spin_lock(&napi_hash_lock);
6070
6071         /* 0..NR_CPUS range is reserved for sender_cpu use */
6072         do {
6073                 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6074                         napi_gen_id = MIN_NAPI_ID;
6075         } while (napi_by_id(napi_gen_id));
6076         napi->napi_id = napi_gen_id;
6077
6078         hlist_add_head_rcu(&napi->napi_hash_node,
6079                            &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6080
6081         spin_unlock(&napi_hash_lock);
6082 }
6083
6084 /* Warning : caller is responsible to make sure rcu grace period
6085  * is respected before freeing memory containing @napi
6086  */
6087 bool napi_hash_del(struct napi_struct *napi)
6088 {
6089         bool rcu_sync_needed = false;
6090
6091         spin_lock(&napi_hash_lock);
6092
6093         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
6094                 rcu_sync_needed = true;
6095                 hlist_del_rcu(&napi->napi_hash_node);
6096         }
6097         spin_unlock(&napi_hash_lock);
6098         return rcu_sync_needed;
6099 }
6100 EXPORT_SYMBOL_GPL(napi_hash_del);
6101
6102 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6103 {
6104         struct napi_struct *napi;
6105
6106         napi = container_of(timer, struct napi_struct, timer);
6107
6108         /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6109          * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6110          */
6111         if (napi->gro_bitmask && !napi_disable_pending(napi) &&
6112             !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
6113                 __napi_schedule_irqoff(napi);
6114
6115         return HRTIMER_NORESTART;
6116 }
6117
6118 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6119                     int (*poll)(struct napi_struct *, int), int weight)
6120 {
6121         int i;
6122
6123         INIT_LIST_HEAD(&napi->poll_list);
6124         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6125         napi->timer.function = napi_watchdog;
6126         napi->gro_bitmask = 0;
6127         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6128                 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6129                 napi->gro_hash[i].count = 0;
6130         }
6131         napi->skb = NULL;
6132         napi->poll = poll;
6133         if (weight > NAPI_POLL_WEIGHT)
6134                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
6135                             weight, dev->name);
6136         napi->weight = weight;
6137         list_add(&napi->dev_list, &dev->napi_list);
6138         napi->dev = dev;
6139 #ifdef CONFIG_NETPOLL
6140         napi->poll_owner = -1;
6141 #endif
6142         set_bit(NAPI_STATE_SCHED, &napi->state);
6143         napi_hash_add(napi);
6144 }
6145 EXPORT_SYMBOL(netif_napi_add);
6146
6147 void napi_disable(struct napi_struct *n)
6148 {
6149         might_sleep();
6150         set_bit(NAPI_STATE_DISABLE, &n->state);
6151
6152         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6153                 msleep(1);
6154         while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6155                 msleep(1);
6156
6157         hrtimer_cancel(&n->timer);
6158
6159         clear_bit(NAPI_STATE_DISABLE, &n->state);
6160 }
6161 EXPORT_SYMBOL(napi_disable);
6162
6163 static void flush_gro_hash(struct napi_struct *napi)
6164 {
6165         int i;
6166
6167         for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6168                 struct sk_buff *skb, *n;
6169
6170                 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6171                         kfree_skb(skb);
6172                 napi->gro_hash[i].count = 0;
6173         }
6174 }
6175
6176 /* Must be called in process context */
6177 void netif_napi_del(struct napi_struct *napi)
6178 {
6179         might_sleep();
6180         if (napi_hash_del(napi))
6181                 synchronize_net();
6182         list_del_init(&napi->dev_list);
6183         napi_free_frags(napi);
6184
6185         flush_gro_hash(napi);
6186         napi->gro_bitmask = 0;
6187 }
6188 EXPORT_SYMBOL(netif_napi_del);
6189
6190 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6191 {
6192         void *have;
6193         int work, weight;
6194
6195         list_del_init(&n->poll_list);
6196
6197         have = netpoll_poll_lock(n);
6198
6199         weight = n->weight;
6200
6201         /* This NAPI_STATE_SCHED test is for avoiding a race
6202          * with netpoll's poll_napi().  Only the entity which
6203          * obtains the lock and sees NAPI_STATE_SCHED set will
6204          * actually make the ->poll() call.  Therefore we avoid
6205          * accidentally calling ->poll() when NAPI is not scheduled.
6206          */
6207         work = 0;
6208         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6209                 work = n->poll(n, weight);
6210                 trace_napi_poll(n, work, weight);
6211         }
6212
6213         WARN_ON_ONCE(work > weight);
6214
6215         if (likely(work < weight))
6216                 goto out_unlock;
6217
6218         /* Drivers must not modify the NAPI state if they
6219          * consume the entire weight.  In such cases this code
6220          * still "owns" the NAPI instance and therefore can
6221          * move the instance around on the list at-will.
6222          */
6223         if (unlikely(napi_disable_pending(n))) {
6224                 napi_complete(n);
6225                 goto out_unlock;
6226         }
6227
6228         if (n->gro_bitmask) {
6229                 /* flush too old packets
6230                  * If HZ < 1000, flush all packets.
6231                  */
6232                 napi_gro_flush(n, HZ >= 1000);
6233         }
6234
6235         /* Some drivers may have called napi_schedule
6236          * prior to exhausting their budget.
6237          */
6238         if (unlikely(!list_empty(&n->poll_list))) {
6239                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6240                              n->dev ? n->dev->name : "backlog");
6241                 goto out_unlock;
6242         }
6243
6244         list_add_tail(&n->poll_list, repoll);
6245
6246 out_unlock:
6247         netpoll_poll_unlock(have);
6248
6249         return work;
6250 }
6251
6252 static __latent_entropy void net_rx_action(struct softirq_action *h)
6253 {
6254         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6255         unsigned long time_limit = jiffies +
6256                 usecs_to_jiffies(netdev_budget_usecs);
6257         int budget = netdev_budget;
6258         LIST_HEAD(list);
6259         LIST_HEAD(repoll);
6260
6261         local_irq_disable();
6262         list_splice_init(&sd->poll_list, &list);
6263         local_irq_enable();
6264
6265         for (;;) {
6266                 struct napi_struct *n;
6267
6268                 if (list_empty(&list)) {
6269                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
6270                                 goto out;
6271                         break;
6272                 }
6273
6274                 n = list_first_entry(&list, struct napi_struct, poll_list);
6275                 budget -= napi_poll(n, &repoll);
6276
6277                 /* If softirq window is exhausted then punt.
6278                  * Allow this to run for 2 jiffies since which will allow
6279                  * an average latency of 1.5/HZ.
6280                  */
6281                 if (unlikely(budget <= 0 ||
6282                              time_after_eq(jiffies, time_limit))) {
6283                         sd->time_squeeze++;
6284                         break;
6285                 }
6286         }
6287
6288         local_irq_disable();
6289
6290         list_splice_tail_init(&sd->poll_list, &list);
6291         list_splice_tail(&repoll, &list);
6292         list_splice(&list, &sd->poll_list);
6293         if (!list_empty(&sd->poll_list))
6294                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6295
6296         net_rps_action_and_irq_enable(sd);
6297 out:
6298         __kfree_skb_flush();
6299 }
6300
6301 struct netdev_adjacent {
6302         struct net_device *dev;
6303
6304         /* upper master flag, there can only be one master device per list */
6305         bool master;
6306
6307         /* counter for the number of times this device was added to us */
6308         u16 ref_nr;
6309
6310         /* private field for the users */
6311         void *private;
6312
6313         struct list_head list;
6314         struct rcu_head rcu;
6315 };
6316
6317 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6318                                                  struct list_head *adj_list)
6319 {
6320         struct netdev_adjacent *adj;
6321
6322         list_for_each_entry(adj, adj_list, list) {
6323                 if (adj->dev == adj_dev)
6324                         return adj;
6325         }
6326         return NULL;
6327 }
6328
6329 static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
6330 {
6331         struct net_device *dev = data;
6332
6333         return upper_dev == dev;
6334 }
6335
6336 /**
6337  * netdev_has_upper_dev - Check if device is linked to an upper device
6338  * @dev: device
6339  * @upper_dev: upper device to check
6340  *
6341  * Find out if a device is linked to specified upper device and return true
6342  * in case it is. Note that this checks only immediate upper device,
6343  * not through a complete stack of devices. The caller must hold the RTNL lock.
6344  */
6345 bool netdev_has_upper_dev(struct net_device *dev,
6346                           struct net_device *upper_dev)
6347 {
6348         ASSERT_RTNL();
6349
6350         return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6351                                              upper_dev);
6352 }
6353 EXPORT_SYMBOL(netdev_has_upper_dev);
6354
6355 /**
6356  * netdev_has_upper_dev_all - Check if device is linked to an upper device
6357  * @dev: device
6358  * @upper_dev: upper device to check
6359  *
6360  * Find out if a device is linked to specified upper device and return true
6361  * in case it is. Note that this checks the entire upper device chain.
6362  * The caller must hold rcu lock.
6363  */
6364
6365 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6366                                   struct net_device *upper_dev)
6367 {
6368         return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
6369                                                upper_dev);
6370 }
6371 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6372
6373 /**
6374  * netdev_has_any_upper_dev - Check if device is linked to some device
6375  * @dev: device
6376  *
6377  * Find out if a device is linked to an upper device and return true in case
6378  * it is. The caller must hold the RTNL lock.
6379  */
6380 bool netdev_has_any_upper_dev(struct net_device *dev)
6381 {
6382         ASSERT_RTNL();
6383
6384         return !list_empty(&dev->adj_list.upper);
6385 }
6386 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6387
6388 /**
6389  * netdev_master_upper_dev_get - Get master upper device
6390  * @dev: device
6391  *
6392  * Find a master upper device and return pointer to it or NULL in case
6393  * it's not there. The caller must hold the RTNL lock.
6394  */
6395 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6396 {
6397         struct netdev_adjacent *upper;
6398
6399         ASSERT_RTNL();
6400
6401         if (list_empty(&dev->adj_list.upper))
6402                 return NULL;
6403
6404         upper = list_first_entry(&dev->adj_list.upper,
6405                                  struct netdev_adjacent, list);
6406         if (likely(upper->master))
6407                 return upper->dev;
6408         return NULL;
6409 }
6410 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6411
6412 /**
6413  * netdev_has_any_lower_dev - Check if device is linked to some device
6414  * @dev: device
6415  *
6416  * Find out if a device is linked to a lower device and return true in case
6417  * it is. The caller must hold the RTNL lock.
6418  */
6419 static bool netdev_has_any_lower_dev(struct net_device *dev)
6420 {
6421         ASSERT_RTNL();
6422
6423         return !list_empty(&dev->adj_list.lower);
6424 }
6425
6426 void *netdev_adjacent_get_private(struct list_head *adj_list)
6427 {
6428         struct netdev_adjacent *adj;
6429
6430         adj = list_entry(adj_list, struct netdev_adjacent, list);
6431
6432         return adj->private;
6433 }
6434 EXPORT_SYMBOL(netdev_adjacent_get_private);
6435
6436 /**
6437  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
6438  * @dev: device
6439  * @iter: list_head ** of the current position
6440  *
6441  * Gets the next device from the dev's upper list, starting from iter
6442  * position. The caller must hold RCU read lock.
6443  */
6444 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
6445                                                  struct list_head **iter)
6446 {
6447         struct netdev_adjacent *upper;
6448
6449         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6450
6451         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6452
6453         if (&upper->list == &dev->adj_list.upper)
6454                 return NULL;
6455
6456         *iter = &upper->list;
6457
6458         return upper->dev;
6459 }
6460 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
6461
6462 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
6463                                                     struct list_head **iter)
6464 {
6465         struct netdev_adjacent *upper;
6466
6467         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
6468
6469         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6470
6471         if (&upper->list == &dev->adj_list.upper)
6472                 return NULL;
6473
6474         *iter = &upper->list;
6475
6476         return upper->dev;
6477 }
6478
6479 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
6480                                   int (*fn)(struct net_device *dev,
6481                                             void *data),
6482                                   void *data)
6483 {
6484         struct net_device *udev;
6485         struct list_head *iter;
6486         int ret;
6487
6488         for (iter = &dev->adj_list.upper,
6489              udev = netdev_next_upper_dev_rcu(dev, &iter);
6490              udev;
6491              udev = netdev_next_upper_dev_rcu(dev, &iter)) {
6492                 /* first is the upper device itself */
6493                 ret = fn(udev, data);
6494                 if (ret)
6495                         return ret;
6496
6497                 /* then look at all of its upper devices */
6498                 ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
6499                 if (ret)
6500                         return ret;
6501         }
6502
6503         return 0;
6504 }
6505 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
6506
6507 /**
6508  * netdev_lower_get_next_private - Get the next ->private from the
6509  *                                 lower neighbour list
6510  * @dev: device
6511  * @iter: list_head ** of the current position
6512  *
6513  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6514  * list, starting from iter position. The caller must hold either hold the
6515  * RTNL lock or its own locking that guarantees that the neighbour lower
6516  * list will remain unchanged.
6517  */
6518 void *netdev_lower_get_next_private(struct net_device *dev,
6519                                     struct list_head **iter)
6520 {
6521         struct netdev_adjacent *lower;
6522
6523         lower = list_entry(*iter, struct netdev_adjacent, list);
6524
6525         if (&lower->list == &dev->adj_list.lower)
6526                 return NULL;
6527
6528         *iter = lower->list.next;
6529
6530         return lower->private;
6531 }
6532 EXPORT_SYMBOL(netdev_lower_get_next_private);
6533
6534 /**
6535  * netdev_lower_get_next_private_rcu - Get the next ->private from the
6536  *                                     lower neighbour list, RCU
6537  *                                     variant
6538  * @dev: device
6539  * @iter: list_head ** of the current position
6540  *
6541  * Gets the next netdev_adjacent->private from the dev's lower neighbour
6542  * list, starting from iter position. The caller must hold RCU read lock.
6543  */
6544 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
6545                                         struct list_head **iter)
6546 {
6547         struct netdev_adjacent *lower;
6548
6549         WARN_ON_ONCE(!rcu_read_lock_held());
6550
6551         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6552
6553         if (&lower->list == &dev->adj_list.lower)
6554                 return NULL;
6555
6556         *iter = &lower->list;
6557
6558         return lower->private;
6559 }
6560 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
6561
6562 /**
6563  * netdev_lower_get_next - Get the next device from the lower neighbour
6564  *                         list
6565  * @dev: device
6566  * @iter: list_head ** of the current position
6567  *
6568  * Gets the next netdev_adjacent from the dev's lower neighbour
6569  * list, starting from iter position. The caller must hold RTNL lock or
6570  * its own locking that guarantees that the neighbour lower
6571  * list will remain unchanged.
6572  */
6573 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
6574 {
6575         struct netdev_adjacent *lower;
6576
6577         lower = list_entry(*iter, struct netdev_adjacent, list);
6578
6579         if (&lower->list == &dev->adj_list.lower)
6580                 return NULL;
6581
6582         *iter = lower->list.next;
6583
6584         return lower->dev;
6585 }
6586 EXPORT_SYMBOL(netdev_lower_get_next);
6587
6588 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
6589                                                 struct list_head **iter)
6590 {
6591         struct netdev_adjacent *lower;
6592
6593         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
6594
6595         if (&lower->list == &dev->adj_list.lower)
6596                 return NULL;
6597
6598         *iter = &lower->list;
6599
6600         return lower->dev;
6601 }
6602
6603 int netdev_walk_all_lower_dev(struct net_device *dev,
6604                               int (*fn)(struct net_device *dev,
6605                                         void *data),
6606                               void *data)
6607 {
6608         struct net_device *ldev;
6609         struct list_head *iter;
6610         int ret;
6611
6612         for (iter = &dev->adj_list.lower,
6613              ldev = netdev_next_lower_dev(dev, &iter);
6614              ldev;
6615              ldev = netdev_next_lower_dev(dev, &iter)) {
6616                 /* first is the lower device itself */
6617                 ret = fn(ldev, data);
6618                 if (ret)
6619                         return ret;
6620
6621                 /* then look at all of its lower devices */
6622                 ret = netdev_walk_all_lower_dev(ldev, fn, data);
6623                 if (ret)
6624                         return ret;
6625         }
6626
6627         return 0;
6628 }
6629 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
6630
6631 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6632                                                     struct list_head **iter)
6633 {
6634         struct netdev_adjacent *lower;
6635
6636         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
6637         if (&lower->list == &dev->adj_list.lower)
6638                 return NULL;
6639
6640         *iter = &lower->list;
6641
6642         return lower->dev;
6643 }
6644
6645 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
6646                                   int (*fn)(struct net_device *dev,
6647                                             void *data),
6648                                   void *data)
6649 {
6650         struct net_device *ldev;
6651         struct list_head *iter;
6652         int ret;
6653
6654         for (iter = &dev->adj_list.lower,
6655              ldev = netdev_next_lower_dev_rcu(dev, &iter);
6656              ldev;
6657              ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
6658                 /* first is the lower device itself */
6659                 ret = fn(ldev, data);
6660                 if (ret)
6661                         return ret;
6662
6663                 /* then look at all of its lower devices */
6664                 ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
6665                 if (ret)
6666                         return ret;
6667         }
6668
6669         return 0;
6670 }
6671 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
6672
6673 /**
6674  * netdev_lower_get_first_private_rcu - Get the first ->private from the
6675  *                                     lower neighbour list, RCU
6676  *                                     variant
6677  * @dev: device
6678  *
6679  * Gets the first netdev_adjacent->private from the dev's lower neighbour
6680  * list. The caller must hold RCU read lock.
6681  */
6682 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
6683 {
6684         struct netdev_adjacent *lower;
6685
6686         lower = list_first_or_null_rcu(&dev->adj_list.lower,
6687                         struct netdev_adjacent, list);
6688         if (lower)
6689                 return lower->private;
6690         return NULL;
6691 }
6692 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
6693
6694 /**
6695  * netdev_master_upper_dev_get_rcu - Get master upper device
6696  * @dev: device
6697  *
6698  * Find a master upper device and return pointer to it or NULL in case
6699  * it's not there. The caller must hold the RCU read lock.
6700  */
6701 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
6702 {
6703         struct netdev_adjacent *upper;
6704
6705         upper = list_first_or_null_rcu(&dev->adj_list.upper,
6706                                        struct netdev_adjacent, list);
6707         if (upper && likely(upper->master))
6708                 return upper->dev;
6709         return NULL;
6710 }
6711 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
6712
6713 static int netdev_adjacent_sysfs_add(struct net_device *dev,
6714                               struct net_device *adj_dev,
6715                               struct list_head *dev_list)
6716 {
6717         char linkname[IFNAMSIZ+7];
6718
6719         sprintf(linkname, dev_list == &dev->adj_list.upper ?
6720                 "upper_%s" : "lower_%s", adj_dev->name);
6721         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
6722                                  linkname);
6723 }
6724 static void netdev_adjacent_sysfs_del(struct net_device *dev,
6725                                char *name,
6726                                struct list_head *dev_list)
6727 {
6728         char linkname[IFNAMSIZ+7];
6729
6730         sprintf(linkname, dev_list == &dev->adj_list.upper ?
6731                 "upper_%s" : "lower_%s", name);
6732         sysfs_remove_link(&(dev->dev.kobj), linkname);
6733 }
6734
6735 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
6736                                                  struct net_device *adj_dev,
6737                                                  struct list_head *dev_list)
6738 {
6739         return (dev_list == &dev->adj_list.upper ||
6740                 dev_list == &dev->adj_list.lower) &&
6741                 net_eq(dev_net(dev), dev_net(adj_dev));
6742 }
6743
6744 static int __netdev_adjacent_dev_insert(struct net_device *dev,
6745                                         struct net_device *adj_dev,
6746                                         struct list_head *dev_list,
6747                                         void *private, bool master)
6748 {
6749         struct netdev_adjacent *adj;
6750         int ret;
6751
6752         adj = __netdev_find_adj(adj_dev, dev_list);
6753
6754         if (adj) {
6755                 adj->ref_nr += 1;
6756                 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
6757                          dev->name, adj_dev->name, adj->ref_nr);
6758
6759                 return 0;
6760         }
6761
6762         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
6763         if (!adj)
6764                 return -ENOMEM;
6765
6766         adj->dev = adj_dev;
6767         adj->master = master;
6768         adj->ref_nr = 1;
6769         adj->private = private;
6770         dev_hold(adj_dev);
6771
6772         pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
6773                  dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
6774
6775         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
6776                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
6777                 if (ret)
6778                         goto free_adj;
6779         }
6780
6781         /* Ensure that master link is always the first item in list. */
6782         if (master) {
6783                 ret = sysfs_create_link(&(dev->dev.kobj),
6784                                         &(adj_dev->dev.kobj), "master");
6785                 if (ret)
6786                         goto remove_symlinks;
6787
6788                 list_add_rcu(&adj->list, dev_list);
6789         } else {
6790                 list_add_tail_rcu(&adj->list, dev_list);
6791         }
6792
6793         return 0;
6794
6795 remove_symlinks:
6796         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6797                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6798 free_adj:
6799         kfree(adj);
6800         dev_put(adj_dev);
6801
6802         return ret;
6803 }
6804
6805 static void __netdev_adjacent_dev_remove(struct net_device *dev,
6806                                          struct net_device *adj_dev,
6807                                          u16 ref_nr,
6808                                          struct list_head *dev_list)
6809 {
6810         struct netdev_adjacent *adj;
6811
6812         pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
6813                  dev->name, adj_dev->name, ref_nr);
6814
6815         adj = __netdev_find_adj(adj_dev, dev_list);
6816
6817         if (!adj) {
6818                 pr_err("Adjacency does not exist for device %s from %s\n",
6819                        dev->name, adj_dev->name);
6820                 WARN_ON(1);
6821                 return;
6822         }
6823
6824         if (adj->ref_nr > ref_nr) {
6825                 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
6826                          dev->name, adj_dev->name, ref_nr,
6827                          adj->ref_nr - ref_nr);
6828                 adj->ref_nr -= ref_nr;
6829                 return;
6830         }
6831
6832         if (adj->master)
6833                 sysfs_remove_link(&(dev->dev.kobj), "master");
6834
6835         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
6836                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
6837
6838         list_del_rcu(&adj->list);
6839         pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
6840                  adj_dev->name, dev->name, adj_dev->name);
6841         dev_put(adj_dev);
6842         kfree_rcu(adj, rcu);
6843 }
6844
6845 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
6846                                             struct net_device *upper_dev,
6847                                             struct list_head *up_list,
6848                                             struct list_head *down_list,
6849                                             void *private, bool master)
6850 {
6851         int ret;
6852
6853         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
6854                                            private, master);
6855         if (ret)
6856                 return ret;
6857
6858         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
6859                                            private, false);
6860         if (ret) {
6861                 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
6862                 return ret;
6863         }
6864
6865         return 0;
6866 }
6867
6868 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
6869                                                struct net_device *upper_dev,
6870                                                u16 ref_nr,
6871                                                struct list_head *up_list,
6872                                                struct list_head *down_list)
6873 {
6874         __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
6875         __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
6876 }
6877
6878 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
6879                                                 struct net_device *upper_dev,
6880                                                 void *private, bool master)
6881 {
6882         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
6883                                                 &dev->adj_list.upper,
6884                                                 &upper_dev->adj_list.lower,
6885                                                 private, master);
6886 }
6887
6888 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
6889                                                    struct net_device *upper_dev)
6890 {
6891         __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
6892                                            &dev->adj_list.upper,
6893                                            &upper_dev->adj_list.lower);
6894 }
6895
6896 static int __netdev_upper_dev_link(struct net_device *dev,
6897                                    struct net_device *upper_dev, bool master,
6898                                    void *upper_priv, void *upper_info,
6899                                    struct netlink_ext_ack *extack)
6900 {
6901         struct netdev_notifier_changeupper_info changeupper_info = {
6902                 .info = {
6903                         .dev = dev,
6904                         .extack = extack,
6905                 },
6906                 .upper_dev = upper_dev,
6907                 .master = master,
6908                 .linking = true,
6909                 .upper_info = upper_info,
6910         };
6911         struct net_device *master_dev;
6912         int ret = 0;
6913
6914         ASSERT_RTNL();
6915
6916         if (dev == upper_dev)
6917                 return -EBUSY;
6918
6919         /* To prevent loops, check if dev is not upper device to upper_dev. */
6920         if (netdev_has_upper_dev(upper_dev, dev))
6921                 return -EBUSY;
6922
6923         if (!master) {
6924                 if (netdev_has_upper_dev(dev, upper_dev))
6925                         return -EEXIST;
6926         } else {
6927                 master_dev = netdev_master_upper_dev_get(dev);
6928                 if (master_dev)
6929                         return master_dev == upper_dev ? -EEXIST : -EBUSY;
6930         }
6931
6932         ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
6933                                             &changeupper_info.info);
6934         ret = notifier_to_errno(ret);
6935         if (ret)
6936                 return ret;
6937
6938         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
6939                                                    master);
6940         if (ret)
6941                 return ret;
6942
6943         ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
6944                                             &changeupper_info.info);
6945         ret = notifier_to_errno(ret);
6946         if (ret)
6947                 goto rollback;
6948
6949         return 0;
6950
6951 rollback:
6952         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
6953
6954         return ret;
6955 }
6956
6957 /**
6958  * netdev_upper_dev_link - Add a link to the upper device
6959  * @dev: device
6960  * @upper_dev: new upper device
6961  * @extack: netlink extended ack
6962  *
6963  * Adds a link to device which is upper to this one. The caller must hold
6964  * the RTNL lock. On a failure a negative errno code is returned.
6965  * On success the reference counts are adjusted and the function
6966  * returns zero.
6967  */
6968 int netdev_upper_dev_link(struct net_device *dev,
6969                           struct net_device *upper_dev,
6970                           struct netlink_ext_ack *extack)
6971 {
6972         return __netdev_upper_dev_link(dev, upper_dev, false,
6973                                        NULL, NULL, extack);
6974 }
6975 EXPORT_SYMBOL(netdev_upper_dev_link);
6976
6977 /**
6978  * netdev_master_upper_dev_link - Add a master link to the upper device
6979  * @dev: device
6980  * @upper_dev: new upper device
6981  * @upper_priv: upper device private
6982  * @upper_info: upper info to be passed down via notifier
6983  * @extack: netlink extended ack
6984  *
6985  * Adds a link to device which is upper to this one. In this case, only
6986  * one master upper device can be linked, although other non-master devices
6987  * might be linked as well. The caller must hold the RTNL lock.
6988  * On a failure a negative errno code is returned. On success the reference
6989  * counts are adjusted and the function returns zero.
6990  */
6991 int netdev_master_upper_dev_link(struct net_device *dev,
6992                                  struct net_device *upper_dev,
6993                                  void *upper_priv, void *upper_info,
6994                                  struct netlink_ext_ack *extack)
6995 {
6996         return __netdev_upper_dev_link(dev, upper_dev, true,
6997                                        upper_priv, upper_info, extack);
6998 }
6999 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7000
7001 /**
7002  * netdev_upper_dev_unlink - Removes a link to upper device
7003  * @dev: device
7004  * @upper_dev: new upper device
7005  *
7006  * Removes a link to device which is upper to this one. The caller must hold
7007  * the RTNL lock.
7008  */
7009 void netdev_upper_dev_unlink(struct net_device *dev,
7010                              struct net_device *upper_dev)
7011 {
7012         struct netdev_notifier_changeupper_info changeupper_info = {
7013                 .info = {
7014                         .dev = dev,
7015                 },
7016                 .upper_dev = upper_dev,
7017                 .linking = false,
7018         };
7019
7020         ASSERT_RTNL();
7021
7022         changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7023
7024         call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7025                                       &changeupper_info.info);
7026
7027         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7028
7029         call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7030                                       &changeupper_info.info);
7031 }
7032 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7033
7034 /**
7035  * netdev_bonding_info_change - Dispatch event about slave change
7036  * @dev: device
7037  * @bonding_info: info to dispatch
7038  *
7039  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
7040  * The caller must hold the RTNL lock.
7041  */
7042 void netdev_bonding_info_change(struct net_device *dev,
7043                                 struct netdev_bonding_info *bonding_info)
7044 {
7045         struct netdev_notifier_bonding_info info = {
7046                 .info.dev = dev,
7047         };
7048
7049         memcpy(&info.bonding_info, bonding_info,
7050                sizeof(struct netdev_bonding_info));
7051         call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
7052                                       &info.info);
7053 }
7054 EXPORT_SYMBOL(netdev_bonding_info_change);
7055
7056 static void netdev_adjacent_add_links(struct net_device *dev)
7057 {
7058         struct netdev_adjacent *iter;
7059
7060         struct net *net = dev_net(dev);
7061
7062         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7063                 if (!net_eq(net, dev_net(iter->dev)))
7064                         continue;
7065                 netdev_adjacent_sysfs_add(iter->dev, dev,
7066                                           &iter->dev->adj_list.lower);
7067                 netdev_adjacent_sysfs_add(dev, iter->dev,
7068                                           &dev->adj_list.upper);
7069         }
7070
7071         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7072                 if (!net_eq(net, dev_net(iter->dev)))
7073                         continue;
7074                 netdev_adjacent_sysfs_add(iter->dev, dev,
7075                                           &iter->dev->adj_list.upper);
7076                 netdev_adjacent_sysfs_add(dev, iter->dev,
7077                                           &dev->adj_list.lower);
7078         }
7079 }
7080
7081 static void netdev_adjacent_del_links(struct net_device *dev)
7082 {
7083         struct netdev_adjacent *iter;
7084
7085         struct net *net = dev_net(dev);
7086
7087         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7088                 if (!net_eq(net, dev_net(iter->dev)))
7089                         continue;
7090                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7091                                           &iter->dev->adj_list.lower);
7092                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7093                                           &dev->adj_list.upper);
7094         }
7095
7096         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7097                 if (!net_eq(net, dev_net(iter->dev)))
7098                         continue;
7099                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
7100                                           &iter->dev->adj_list.upper);
7101                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
7102                                           &dev->adj_list.lower);
7103         }
7104 }
7105
7106 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
7107 {
7108         struct netdev_adjacent *iter;
7109
7110         struct net *net = dev_net(dev);
7111
7112         list_for_each_entry(iter, &dev->adj_list.upper, list) {
7113                 if (!net_eq(net, dev_net(iter->dev)))
7114                         continue;
7115                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7116                                           &iter->dev->adj_list.lower);
7117                 netdev_adjacent_sysfs_add(iter->dev, dev,
7118                                           &iter->dev->adj_list.lower);
7119         }
7120
7121         list_for_each_entry(iter, &dev->adj_list.lower, list) {
7122                 if (!net_eq(net, dev_net(iter->dev)))
7123                         continue;
7124                 netdev_adjacent_sysfs_del(iter->dev, oldname,
7125                                           &iter->dev->adj_list.upper);
7126                 netdev_adjacent_sysfs_add(iter->dev, dev,
7127                                           &iter->dev->adj_list.upper);
7128         }
7129 }
7130
7131 void *netdev_lower_dev_get_private(struct net_device *dev,
7132                                    struct net_device *lower_dev)
7133 {
7134         struct netdev_adjacent *lower;
7135
7136         if (!lower_dev)
7137                 return NULL;
7138         lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
7139         if (!lower)
7140                 return NULL;
7141
7142         return lower->private;
7143 }
7144 EXPORT_SYMBOL(netdev_lower_dev_get_private);
7145
7146
7147 int dev_get_nest_level(struct net_device *dev)
7148 {
7149         struct net_device *lower = NULL;
7150         struct list_head *iter;
7151         int max_nest = -1;
7152         int nest;
7153
7154         ASSERT_RTNL();
7155
7156         netdev_for_each_lower_dev(dev, lower, iter) {
7157                 nest = dev_get_nest_level(lower);
7158                 if (max_nest < nest)
7159                         max_nest = nest;
7160         }
7161
7162         return max_nest + 1;
7163 }
7164 EXPORT_SYMBOL(dev_get_nest_level);
7165
7166 /**
7167  * netdev_lower_change - Dispatch event about lower device state change
7168  * @lower_dev: device
7169  * @lower_state_info: state to dispatch
7170  *
7171  * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
7172  * The caller must hold the RTNL lock.
7173  */
7174 void netdev_lower_state_changed(struct net_device *lower_dev,
7175                                 void *lower_state_info)
7176 {
7177         struct netdev_notifier_changelowerstate_info changelowerstate_info = {
7178                 .info.dev = lower_dev,
7179         };
7180
7181         ASSERT_RTNL();
7182         changelowerstate_info.lower_state_info = lower_state_info;
7183         call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
7184                                       &changelowerstate_info.info);
7185 }
7186 EXPORT_SYMBOL(netdev_lower_state_changed);
7187
7188 static void dev_change_rx_flags(struct net_device *dev, int flags)
7189 {
7190         const struct net_device_ops *ops = dev->netdev_ops;
7191
7192         if (ops->ndo_change_rx_flags)
7193                 ops->ndo_change_rx_flags(dev, flags);
7194 }
7195
7196 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
7197 {
7198         unsigned int old_flags = dev->flags;
7199         kuid_t uid;
7200         kgid_t gid;
7201
7202         ASSERT_RTNL();
7203
7204         dev->flags |= IFF_PROMISC;
7205         dev->promiscuity += inc;
7206         if (dev->promiscuity == 0) {
7207                 /*
7208                  * Avoid overflow.
7209                  * If inc causes overflow, untouch promisc and return error.
7210                  */
7211                 if (inc < 0)
7212                         dev->flags &= ~IFF_PROMISC;
7213                 else {
7214                         dev->promiscuity -= inc;
7215                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
7216                                 dev->name);
7217                         return -EOVERFLOW;
7218                 }
7219         }
7220         if (dev->flags != old_flags) {
7221                 pr_info("device %s %s promiscuous mode\n",
7222                         dev->name,
7223                         dev->flags & IFF_PROMISC ? "entered" : "left");
7224                 if (audit_enabled) {
7225                         current_uid_gid(&uid, &gid);
7226                         audit_log(audit_context(), GFP_ATOMIC,
7227                                   AUDIT_ANOM_PROMISCUOUS,
7228                                   "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
7229                                   dev->name, (dev->flags & IFF_PROMISC),
7230                                   (old_flags & IFF_PROMISC),
7231                                   from_kuid(&init_user_ns, audit_get_loginuid(current)),
7232                                   from_kuid(&init_user_ns, uid),
7233                                   from_kgid(&init_user_ns, gid),
7234                                   audit_get_sessionid(current));
7235                 }
7236
7237                 dev_change_rx_flags(dev, IFF_PROMISC);
7238         }
7239         if (notify)
7240                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
7241         return 0;
7242 }
7243
7244 /**
7245  *      dev_set_promiscuity     - update promiscuity count on a device
7246  *      @dev: device
7247  *      @inc: modifier
7248  *
7249  *      Add or remove promiscuity from a device. While the count in the device
7250  *      remains above zero the interface remains promiscuous. Once it hits zero
7251  *      the device reverts back to normal filtering operation. A negative inc
7252  *      value is used to drop promiscuity on the device.
7253  *      Return 0 if successful or a negative errno code on error.
7254  */
7255 int dev_set_promiscuity(struct net_device *dev, int inc)
7256 {
7257         unsigned int old_flags = dev->flags;
7258         int err;
7259
7260         err = __dev_set_promiscuity(dev, inc, true);
7261         if (err < 0)
7262                 return err;
7263         if (dev->flags != old_flags)
7264                 dev_set_rx_mode(dev);
7265         return err;
7266 }
7267 EXPORT_SYMBOL(dev_set_promiscuity);
7268
7269 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
7270 {
7271         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
7272
7273         ASSERT_RTNL();
7274
7275         dev->flags |= IFF_ALLMULTI;
7276         dev->allmulti += inc;
7277         if (dev->allmulti == 0) {
7278                 /*
7279                  * Avoid overflow.
7280                  * If inc causes overflow, untouch allmulti and return error.
7281                  */
7282                 if (inc < 0)
7283                         dev->flags &= ~IFF_ALLMULTI;
7284                 else {
7285                         dev->allmulti -= inc;
7286                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
7287                                 dev->name);
7288                         return -EOVERFLOW;
7289                 }
7290         }
7291         if (dev->flags ^ old_flags) {
7292                 dev_change_rx_flags(dev, IFF_ALLMULTI);
7293                 dev_set_rx_mode(dev);
7294                 if (notify)
7295                         __dev_notify_flags(dev, old_flags,
7296                                            dev->gflags ^ old_gflags);
7297         }
7298         return 0;
7299 }
7300
7301 /**
7302  *      dev_set_allmulti        - update allmulti count on a device
7303  *      @dev: device
7304  *      @inc: modifier
7305  *
7306  *      Add or remove reception of all multicast frames to a device. While the
7307  *      count in the device remains above zero the interface remains listening
7308  *      to all interfaces. Once it hits zero the device reverts back to normal
7309  *      filtering operation. A negative @inc value is used to drop the counter
7310  *      when releasing a resource needing all multicasts.
7311  *      Return 0 if successful or a negative errno code on error.
7312  */
7313
7314 int dev_set_allmulti(struct net_device *dev, int inc)
7315 {
7316         return __dev_set_allmulti(dev, inc, true);
7317 }
7318 EXPORT_SYMBOL(dev_set_allmulti);
7319
7320 /*
7321  *      Upload unicast and multicast address lists to device and
7322  *      configure RX filtering. When the device doesn't support unicast
7323  *      filtering it is put in promiscuous mode while unicast addresses
7324  *      are present.
7325  */
7326 void __dev_set_rx_mode(struct net_device *dev)
7327 {
7328         const struct net_device_ops *ops = dev->netdev_ops;
7329
7330         /* dev_open will call this function so the list will stay sane. */
7331         if (!(dev->flags&IFF_UP))
7332                 return;
7333
7334         if (!netif_device_present(dev))
7335                 return;
7336
7337         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
7338                 /* Unicast addresses changes may only happen under the rtnl,
7339                  * therefore calling __dev_set_promiscuity here is safe.
7340                  */
7341                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
7342                         __dev_set_promiscuity(dev, 1, false);
7343                         dev->uc_promisc = true;
7344                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
7345                         __dev_set_promiscuity(dev, -1, false);
7346                         dev->uc_promisc = false;
7347                 }
7348         }
7349
7350         if (ops->ndo_set_rx_mode)
7351                 ops->ndo_set_rx_mode(dev);
7352 }
7353
7354 void dev_set_rx_mode(struct net_device *dev)
7355 {
7356         netif_addr_lock_bh(dev);
7357         __dev_set_rx_mode(dev);
7358         netif_addr_unlock_bh(dev);
7359 }
7360
7361 /**
7362  *      dev_get_flags - get flags reported to userspace
7363  *      @dev: device
7364  *
7365  *      Get the combination of flag bits exported through APIs to userspace.
7366  */
7367 unsigned int dev_get_flags(const struct net_device *dev)
7368 {
7369         unsigned int flags;
7370
7371         flags = (dev->flags & ~(IFF_PROMISC |
7372                                 IFF_ALLMULTI |
7373                                 IFF_RUNNING |
7374                                 IFF_LOWER_UP |
7375                                 IFF_DORMANT)) |
7376                 (dev->gflags & (IFF_PROMISC |
7377                                 IFF_ALLMULTI));
7378
7379         if (netif_running(dev)) {
7380                 if (netif_oper_up(dev))
7381                         flags |= IFF_RUNNING;
7382                 if (netif_carrier_ok(dev))
7383                         flags |= IFF_LOWER_UP;
7384                 if (netif_dormant(dev))
7385                         flags |= IFF_DORMANT;
7386         }
7387
7388         return flags;
7389 }
7390 EXPORT_SYMBOL(dev_get_flags);
7391
7392 int __dev_change_flags(struct net_device *dev, unsigned int flags)
7393 {
7394         unsigned int old_flags = dev->flags;
7395         int ret;
7396
7397         ASSERT_RTNL();
7398
7399         /*
7400          *      Set the flags on our device.
7401          */
7402
7403         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
7404                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
7405                                IFF_AUTOMEDIA)) |
7406                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
7407                                     IFF_ALLMULTI));
7408
7409         /*
7410          *      Load in the correct multicast list now the flags have changed.
7411          */
7412
7413         if ((old_flags ^ flags) & IFF_MULTICAST)
7414                 dev_change_rx_flags(dev, IFF_MULTICAST);
7415
7416         dev_set_rx_mode(dev);
7417
7418         /*
7419          *      Have we downed the interface. We handle IFF_UP ourselves
7420          *      according to user attempts to set it, rather than blindly
7421          *      setting it.
7422          */
7423
7424         ret = 0;
7425         if ((old_flags ^ flags) & IFF_UP) {
7426                 if (old_flags & IFF_UP)
7427                         __dev_close(dev);
7428                 else
7429                         ret = __dev_open(dev);
7430         }
7431
7432         if ((flags ^ dev->gflags) & IFF_PROMISC) {
7433                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
7434                 unsigned int old_flags = dev->flags;
7435
7436                 dev->gflags ^= IFF_PROMISC;
7437
7438                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
7439                         if (dev->flags != old_flags)
7440                                 dev_set_rx_mode(dev);
7441         }
7442
7443         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
7444          * is important. Some (broken) drivers set IFF_PROMISC, when
7445          * IFF_ALLMULTI is requested not asking us and not reporting.
7446          */
7447         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
7448                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
7449
7450                 dev->gflags ^= IFF_ALLMULTI;
7451                 __dev_set_allmulti(dev, inc, false);
7452         }
7453
7454         return ret;
7455 }
7456
7457 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
7458                         unsigned int gchanges)
7459 {
7460         unsigned int changes = dev->flags ^ old_flags;
7461
7462         if (gchanges)
7463                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
7464
7465         if (changes & IFF_UP) {
7466                 if (dev->flags & IFF_UP)
7467                         call_netdevice_notifiers(NETDEV_UP, dev);
7468                 else
7469                         call_netdevice_notifiers(NETDEV_DOWN, dev);
7470         }
7471
7472         if (dev->flags & IFF_UP &&
7473             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
7474                 struct netdev_notifier_change_info change_info = {
7475                         .info = {
7476                                 .dev = dev,
7477                         },
7478                         .flags_changed = changes,
7479                 };
7480
7481                 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
7482         }
7483 }
7484
7485 /**
7486  *      dev_change_flags - change device settings
7487  *      @dev: device
7488  *      @flags: device state flags
7489  *
7490  *      Change settings on device based state flags. The flags are
7491  *      in the userspace exported format.
7492  */
7493 int dev_change_flags(struct net_device *dev, unsigned int flags)
7494 {
7495         int ret;
7496         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
7497
7498         ret = __dev_change_flags(dev, flags);
7499         if (ret < 0)
7500                 return ret;
7501
7502         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
7503         __dev_notify_flags(dev, old_flags, changes);
7504         return ret;
7505 }
7506 EXPORT_SYMBOL(dev_change_flags);
7507
7508 int __dev_set_mtu(struct net_device *dev, int new_mtu)
7509 {
7510         const struct net_device_ops *ops = dev->netdev_ops;
7511
7512         if (ops->ndo_change_mtu)
7513                 return ops->ndo_change_mtu(dev, new_mtu);
7514
7515         dev->mtu = new_mtu;
7516         return 0;
7517 }
7518 EXPORT_SYMBOL(__dev_set_mtu);
7519
7520 /**
7521  *      dev_set_mtu - Change maximum transfer unit
7522  *      @dev: device
7523  *      @new_mtu: new transfer unit
7524  *
7525  *      Change the maximum transfer size of the network device.
7526  */
7527 int dev_set_mtu(struct net_device *dev, int new_mtu)
7528 {
7529         int err, orig_mtu;
7530
7531         if (new_mtu == dev->mtu)
7532                 return 0;
7533
7534         /* MTU must be positive, and in range */
7535         if (new_mtu < 0 || new_mtu < dev->min_mtu) {
7536                 net_err_ratelimited("%s: Invalid MTU %d requested, hw min %d\n",
7537                                     dev->name, new_mtu, dev->min_mtu);
7538                 return -EINVAL;
7539         }
7540
7541         if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
7542                 net_err_ratelimited("%s: Invalid MTU %d requested, hw max %d\n",
7543                                     dev->name, new_mtu, dev->max_mtu);
7544                 return -EINVAL;
7545         }
7546
7547         if (!netif_device_present(dev))
7548                 return -ENODEV;
7549
7550         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
7551         err = notifier_to_errno(err);
7552         if (err)
7553                 return err;
7554
7555         orig_mtu = dev->mtu;
7556         err = __dev_set_mtu(dev, new_mtu);
7557
7558         if (!err) {
7559                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7560                 err = notifier_to_errno(err);
7561                 if (err) {
7562                         /* setting mtu back and notifying everyone again,
7563                          * so that they have a chance to revert changes.
7564                          */
7565                         __dev_set_mtu(dev, orig_mtu);
7566                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
7567                 }
7568         }
7569         return err;
7570 }
7571 EXPORT_SYMBOL(dev_set_mtu);
7572
7573 /**
7574  *      dev_change_tx_queue_len - Change TX queue length of a netdevice
7575  *      @dev: device
7576  *      @new_len: new tx queue length
7577  */
7578 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
7579 {
7580         unsigned int orig_len = dev->tx_queue_len;
7581         int res;
7582
7583         if (new_len != (unsigned int)new_len)
7584                 return -ERANGE;
7585
7586         if (new_len != orig_len) {
7587                 dev->tx_queue_len = new_len;
7588                 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
7589                 res = notifier_to_errno(res);
7590                 if (res) {
7591                         netdev_err(dev,
7592                                    "refused to change device tx_queue_len\n");
7593                         dev->tx_queue_len = orig_len;
7594                         return res;
7595                 }
7596                 return dev_qdisc_change_tx_queue_len(dev);
7597         }
7598
7599         return 0;
7600 }
7601
7602 /**
7603  *      dev_set_group - Change group this device belongs to
7604  *      @dev: device
7605  *      @new_group: group this device should belong to
7606  */
7607 void dev_set_group(struct net_device *dev, int new_group)
7608 {
7609         dev->group = new_group;
7610 }
7611 EXPORT_SYMBOL(dev_set_group);
7612
7613 /**
7614  *      dev_set_mac_address - Change Media Access Control Address
7615  *      @dev: device
7616  *      @sa: new address
7617  *
7618  *      Change the hardware (MAC) address of the device
7619  */
7620 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
7621 {
7622         const struct net_device_ops *ops = dev->netdev_ops;
7623         int err;
7624
7625         if (!ops->ndo_set_mac_address)
7626                 return -EOPNOTSUPP;
7627         if (sa->sa_family != dev->type)
7628                 return -EINVAL;
7629         if (!netif_device_present(dev))
7630                 return -ENODEV;
7631         err = ops->ndo_set_mac_address(dev, sa);
7632         if (err)
7633                 return err;
7634         dev->addr_assign_type = NET_ADDR_SET;
7635         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7636         add_device_randomness(dev->dev_addr, dev->addr_len);
7637         return 0;
7638 }
7639 EXPORT_SYMBOL(dev_set_mac_address);
7640
7641 /**
7642  *      dev_change_carrier - Change device carrier
7643  *      @dev: device
7644  *      @new_carrier: new value
7645  *
7646  *      Change device carrier
7647  */
7648 int dev_change_carrier(struct net_device *dev, bool new_carrier)
7649 {
7650         const struct net_device_ops *ops = dev->netdev_ops;
7651
7652         if (!ops->ndo_change_carrier)
7653                 return -EOPNOTSUPP;
7654         if (!netif_device_present(dev))
7655                 return -ENODEV;
7656         return ops->ndo_change_carrier(dev, new_carrier);
7657 }
7658 EXPORT_SYMBOL(dev_change_carrier);
7659
7660 /**
7661  *      dev_get_phys_port_id - Get device physical port ID
7662  *      @dev: device
7663  *      @ppid: port ID
7664  *
7665  *      Get device physical port ID
7666  */
7667 int dev_get_phys_port_id(struct net_device *dev,
7668                          struct netdev_phys_item_id *ppid)
7669 {
7670         const struct net_device_ops *ops = dev->netdev_ops;
7671
7672         if (!ops->ndo_get_phys_port_id)
7673                 return -EOPNOTSUPP;
7674         return ops->ndo_get_phys_port_id(dev, ppid);
7675 }
7676 EXPORT_SYMBOL(dev_get_phys_port_id);
7677
7678 /**
7679  *      dev_get_phys_port_name - Get device physical port name
7680  *      @dev: device
7681  *      @name: port name
7682  *      @len: limit of bytes to copy to name
7683  *
7684  *      Get device physical port name
7685  */
7686 int dev_get_phys_port_name(struct net_device *dev,
7687                            char *name, size_t len)
7688 {
7689         const struct net_device_ops *ops = dev->netdev_ops;
7690
7691         if (!ops->ndo_get_phys_port_name)
7692                 return -EOPNOTSUPP;
7693         return ops->ndo_get_phys_port_name(dev, name, len);
7694 }
7695 EXPORT_SYMBOL(dev_get_phys_port_name);
7696
7697 /**
7698  *      dev_change_proto_down - update protocol port state information
7699  *      @dev: device
7700  *      @proto_down: new value
7701  *
7702  *      This info can be used by switch drivers to set the phys state of the
7703  *      port.
7704  */
7705 int dev_change_proto_down(struct net_device *dev, bool proto_down)
7706 {
7707         const struct net_device_ops *ops = dev->netdev_ops;
7708
7709         if (!ops->ndo_change_proto_down)
7710                 return -EOPNOTSUPP;
7711         if (!netif_device_present(dev))
7712                 return -ENODEV;
7713         return ops->ndo_change_proto_down(dev, proto_down);
7714 }
7715 EXPORT_SYMBOL(dev_change_proto_down);
7716
7717 u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
7718                     enum bpf_netdev_command cmd)
7719 {
7720         struct netdev_bpf xdp;
7721
7722         if (!bpf_op)
7723                 return 0;
7724
7725         memset(&xdp, 0, sizeof(xdp));
7726         xdp.command = cmd;
7727
7728         /* Query must always succeed. */
7729         WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG);
7730
7731         return xdp.prog_id;
7732 }
7733
7734 static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
7735                            struct netlink_ext_ack *extack, u32 flags,
7736                            struct bpf_prog *prog)
7737 {
7738         struct netdev_bpf xdp;
7739
7740         memset(&xdp, 0, sizeof(xdp));
7741         if (flags & XDP_FLAGS_HW_MODE)
7742                 xdp.command = XDP_SETUP_PROG_HW;
7743         else
7744                 xdp.command = XDP_SETUP_PROG;
7745         xdp.extack = extack;
7746         xdp.flags = flags;
7747         xdp.prog = prog;
7748
7749         return bpf_op(dev, &xdp);
7750 }
7751
7752 static void dev_xdp_uninstall(struct net_device *dev)
7753 {
7754         struct netdev_bpf xdp;
7755         bpf_op_t ndo_bpf;
7756
7757         /* Remove generic XDP */
7758         WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
7759
7760         /* Remove from the driver */
7761         ndo_bpf = dev->netdev_ops->ndo_bpf;
7762         if (!ndo_bpf)
7763                 return;
7764
7765         memset(&xdp, 0, sizeof(xdp));
7766         xdp.command = XDP_QUERY_PROG;
7767         WARN_ON(ndo_bpf(dev, &xdp));
7768         if (xdp.prog_id)
7769                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7770                                         NULL));
7771
7772         /* Remove HW offload */
7773         memset(&xdp, 0, sizeof(xdp));
7774         xdp.command = XDP_QUERY_PROG_HW;
7775         if (!ndo_bpf(dev, &xdp) && xdp.prog_id)
7776                 WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags,
7777                                         NULL));
7778 }
7779
7780 /**
7781  *      dev_change_xdp_fd - set or clear a bpf program for a device rx path
7782  *      @dev: device
7783  *      @extack: netlink extended ack
7784  *      @fd: new program fd or negative value to clear
7785  *      @flags: xdp-related flags
7786  *
7787  *      Set or clear a bpf program for a device
7788  */
7789 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
7790                       int fd, u32 flags)
7791 {
7792         const struct net_device_ops *ops = dev->netdev_ops;
7793         enum bpf_netdev_command query;
7794         struct bpf_prog *prog = NULL;
7795         bpf_op_t bpf_op, bpf_chk;
7796         int err;
7797
7798         ASSERT_RTNL();
7799
7800         query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG;
7801
7802         bpf_op = bpf_chk = ops->ndo_bpf;
7803         if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE)))
7804                 return -EOPNOTSUPP;
7805         if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE))
7806                 bpf_op = generic_xdp_install;
7807         if (bpf_op == bpf_chk)
7808                 bpf_chk = generic_xdp_install;
7809
7810         if (fd >= 0) {
7811                 if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) ||
7812                     __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW))
7813                         return -EEXIST;
7814                 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
7815                     __dev_xdp_query(dev, bpf_op, query))
7816                         return -EBUSY;
7817
7818                 prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
7819                                              bpf_op == ops->ndo_bpf);
7820                 if (IS_ERR(prog))
7821                         return PTR_ERR(prog);
7822
7823                 if (!(flags & XDP_FLAGS_HW_MODE) &&
7824                     bpf_prog_is_dev_bound(prog->aux)) {
7825                         NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
7826                         bpf_prog_put(prog);
7827                         return -EINVAL;
7828                 }
7829         }
7830
7831         err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
7832         if (err < 0 && prog)
7833                 bpf_prog_put(prog);
7834
7835         return err;
7836 }
7837
7838 /**
7839  *      dev_new_index   -       allocate an ifindex
7840  *      @net: the applicable net namespace
7841  *
7842  *      Returns a suitable unique value for a new device interface
7843  *      number.  The caller must hold the rtnl semaphore or the
7844  *      dev_base_lock to be sure it remains unique.
7845  */
7846 static int dev_new_index(struct net *net)
7847 {
7848         int ifindex = net->ifindex;
7849
7850         for (;;) {
7851                 if (++ifindex <= 0)
7852                         ifindex = 1;
7853                 if (!__dev_get_by_index(net, ifindex))
7854                         return net->ifindex = ifindex;
7855         }
7856 }
7857
7858 /* Delayed registration/unregisteration */
7859 static LIST_HEAD(net_todo_list);
7860 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
7861
7862 static void net_set_todo(struct net_device *dev)
7863 {
7864         list_add_tail(&dev->todo_list, &net_todo_list);
7865         dev_net(dev)->dev_unreg_count++;
7866 }
7867
7868 static void rollback_registered_many(struct list_head *head)
7869 {
7870         struct net_device *dev, *tmp;
7871         LIST_HEAD(close_head);
7872
7873         BUG_ON(dev_boot_phase);
7874         ASSERT_RTNL();
7875
7876         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
7877                 /* Some devices call without registering
7878                  * for initialization unwind. Remove those
7879                  * devices and proceed with the remaining.
7880                  */
7881                 if (dev->reg_state == NETREG_UNINITIALIZED) {
7882                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
7883                                  dev->name, dev);
7884
7885                         WARN_ON(1);
7886                         list_del(&dev->unreg_list);
7887                         continue;
7888                 }
7889                 dev->dismantle = true;
7890                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
7891         }
7892
7893         /* If device is running, close it first. */
7894         list_for_each_entry(dev, head, unreg_list)
7895                 list_add_tail(&dev->close_list, &close_head);
7896         dev_close_many(&close_head, true);
7897
7898         list_for_each_entry(dev, head, unreg_list) {
7899                 /* And unlink it from device chain. */
7900                 unlist_netdevice(dev);
7901
7902                 dev->reg_state = NETREG_UNREGISTERING;
7903         }
7904         flush_all_backlogs();
7905
7906         synchronize_net();
7907
7908         list_for_each_entry(dev, head, unreg_list) {
7909                 struct sk_buff *skb = NULL;
7910
7911                 /* Shutdown queueing discipline. */
7912                 dev_shutdown(dev);
7913
7914                 dev_xdp_uninstall(dev);
7915
7916                 /* Notify protocols, that we are about to destroy
7917                  * this device. They should clean all the things.
7918                  */
7919                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7920
7921                 if (!dev->rtnl_link_ops ||
7922                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7923                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
7924                                                      GFP_KERNEL, NULL, 0);
7925
7926                 /*
7927                  *      Flush the unicast and multicast chains
7928                  */
7929                 dev_uc_flush(dev);
7930                 dev_mc_flush(dev);
7931
7932                 if (dev->netdev_ops->ndo_uninit)
7933                         dev->netdev_ops->ndo_uninit(dev);
7934
7935                 if (skb)
7936                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
7937
7938                 /* Notifier chain MUST detach us all upper devices. */
7939                 WARN_ON(netdev_has_any_upper_dev(dev));
7940                 WARN_ON(netdev_has_any_lower_dev(dev));
7941
7942                 /* Remove entries from kobject tree */
7943                 netdev_unregister_kobject(dev);
7944 #ifdef CONFIG_XPS
7945                 /* Remove XPS queueing entries */
7946                 netif_reset_xps_queues_gt(dev, 0);
7947 #endif
7948         }
7949
7950         synchronize_net();
7951
7952         list_for_each_entry(dev, head, unreg_list)
7953                 dev_put(dev);
7954 }
7955
7956 static void rollback_registered(struct net_device *dev)
7957 {
7958         LIST_HEAD(single);
7959
7960         list_add(&dev->unreg_list, &single);
7961         rollback_registered_many(&single);
7962         list_del(&single);
7963 }
7964
7965 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
7966         struct net_device *upper, netdev_features_t features)
7967 {
7968         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7969         netdev_features_t feature;
7970         int feature_bit;
7971
7972         for_each_netdev_feature(&upper_disables, feature_bit) {
7973                 feature = __NETIF_F_BIT(feature_bit);
7974                 if (!(upper->wanted_features & feature)
7975                     && (features & feature)) {
7976                         netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
7977                                    &feature, upper->name);
7978                         features &= ~feature;
7979                 }
7980         }
7981
7982         return features;
7983 }
7984
7985 static void netdev_sync_lower_features(struct net_device *upper,
7986         struct net_device *lower, netdev_features_t features)
7987 {
7988         netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
7989         netdev_features_t feature;
7990         int feature_bit;
7991
7992         for_each_netdev_feature(&upper_disables, feature_bit) {
7993                 feature = __NETIF_F_BIT(feature_bit);
7994                 if (!(features & feature) && (lower->features & feature)) {
7995                         netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
7996                                    &feature, lower->name);
7997                         lower->wanted_features &= ~feature;
7998                         netdev_update_features(lower);
7999
8000                         if (unlikely(lower->features & feature))
8001                                 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
8002                                             &feature, lower->name);
8003                 }
8004         }
8005 }
8006
8007 static netdev_features_t netdev_fix_features(struct net_device *dev,
8008         netdev_features_t features)
8009 {
8010         /* Fix illegal checksum combinations */
8011         if ((features & NETIF_F_HW_CSUM) &&
8012             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
8013                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
8014                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
8015         }
8016
8017         /* TSO requires that SG is present as well. */
8018         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
8019                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
8020                 features &= ~NETIF_F_ALL_TSO;
8021         }
8022
8023         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
8024                                         !(features & NETIF_F_IP_CSUM)) {
8025                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
8026                 features &= ~NETIF_F_TSO;
8027                 features &= ~NETIF_F_TSO_ECN;
8028         }
8029
8030         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
8031                                          !(features & NETIF_F_IPV6_CSUM)) {
8032                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
8033                 features &= ~NETIF_F_TSO6;
8034         }
8035
8036         /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
8037         if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
8038                 features &= ~NETIF_F_TSO_MANGLEID;
8039
8040         /* TSO ECN requires that TSO is present as well. */
8041         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
8042                 features &= ~NETIF_F_TSO_ECN;
8043
8044         /* Software GSO depends on SG. */
8045         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
8046                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
8047                 features &= ~NETIF_F_GSO;
8048         }
8049
8050         /* GSO partial features require GSO partial be set */
8051         if ((features & dev->gso_partial_features) &&
8052             !(features & NETIF_F_GSO_PARTIAL)) {
8053                 netdev_dbg(dev,
8054                            "Dropping partially supported GSO features since no GSO partial.\n");
8055                 features &= ~dev->gso_partial_features;
8056         }
8057
8058         if (!(features & NETIF_F_RXCSUM)) {
8059                 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
8060                  * successfully merged by hardware must also have the
8061                  * checksum verified by hardware.  If the user does not
8062                  * want to enable RXCSUM, logically, we should disable GRO_HW.
8063                  */
8064                 if (features & NETIF_F_GRO_HW) {
8065                         netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
8066                         features &= ~NETIF_F_GRO_HW;
8067                 }
8068         }
8069
8070         /* LRO/HW-GRO features cannot be combined with RX-FCS */
8071         if (features & NETIF_F_RXFCS) {
8072                 if (features & NETIF_F_LRO) {
8073                         netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
8074                         features &= ~NETIF_F_LRO;
8075                 }
8076
8077                 if (features & NETIF_F_GRO_HW) {
8078                         netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
8079                         features &= ~NETIF_F_GRO_HW;
8080                 }
8081         }
8082
8083         return features;
8084 }
8085
8086 int __netdev_update_features(struct net_device *dev)
8087 {
8088         struct net_device *upper, *lower;
8089         netdev_features_t features;
8090         struct list_head *iter;
8091         int err = -1;
8092
8093         ASSERT_RTNL();
8094
8095         features = netdev_get_wanted_features(dev);
8096
8097         if (dev->netdev_ops->ndo_fix_features)
8098                 features = dev->netdev_ops->ndo_fix_features(dev, features);
8099
8100         /* driver might be less strict about feature dependencies */
8101         features = netdev_fix_features(dev, features);
8102
8103         /* some features can't be enabled if they're off an an upper device */
8104         netdev_for_each_upper_dev_rcu(dev, upper, iter)
8105                 features = netdev_sync_upper_features(dev, upper, features);
8106
8107         if (dev->features == features)
8108                 goto sync_lower;
8109
8110         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
8111                 &dev->features, &features);
8112
8113         if (dev->netdev_ops->ndo_set_features)
8114                 err = dev->netdev_ops->ndo_set_features(dev, features);
8115         else
8116                 err = 0;
8117
8118         if (unlikely(err < 0)) {
8119                 netdev_err(dev,
8120                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
8121                         err, &features, &dev->features);
8122                 /* return non-0 since some features might have changed and
8123                  * it's better to fire a spurious notification than miss it
8124                  */
8125                 return -1;
8126         }
8127
8128 sync_lower:
8129         /* some features must be disabled on lower devices when disabled
8130          * on an upper device (think: bonding master or bridge)
8131          */
8132         netdev_for_each_lower_dev(dev, lower, iter)
8133                 netdev_sync_lower_features(dev, lower, features);
8134
8135         if (!err) {
8136                 netdev_features_t diff = features ^ dev->features;
8137
8138                 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
8139                         /* udp_tunnel_{get,drop}_rx_info both need
8140                          * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
8141                          * device, or they won't do anything.
8142                          * Thus we need to update dev->features
8143                          * *before* calling udp_tunnel_get_rx_info,
8144                          * but *after* calling udp_tunnel_drop_rx_info.
8145                          */
8146                         if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
8147                                 dev->features = features;
8148                                 udp_tunnel_get_rx_info(dev);
8149                         } else {
8150                                 udp_tunnel_drop_rx_info(dev);
8151                         }
8152                 }
8153
8154                 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
8155                         if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
8156                                 dev->features = features;
8157                                 err |= vlan_get_rx_ctag_filter_info(dev);
8158                         } else {
8159                                 vlan_drop_rx_ctag_filter_info(dev);
8160                         }
8161                 }
8162
8163                 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
8164                         if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
8165                                 dev->features = features;
8166                                 err |= vlan_get_rx_stag_filter_info(dev);
8167                         } else {
8168                                 vlan_drop_rx_stag_filter_info(dev);
8169                         }
8170                 }
8171
8172                 dev->features = features;
8173         }
8174
8175         return err < 0 ? 0 : 1;
8176 }
8177
8178 /**
8179  *      netdev_update_features - recalculate device features
8180  *      @dev: the device to check
8181  *
8182  *      Recalculate dev->features set and send notifications if it
8183  *      has changed. Should be called after driver or hardware dependent
8184  *      conditions might have changed that influence the features.
8185  */
8186 void netdev_update_features(struct net_device *dev)
8187 {
8188         if (__netdev_update_features(dev))
8189                 netdev_features_change(dev);
8190 }
8191 EXPORT_SYMBOL(netdev_update_features);
8192
8193 /**
8194  *      netdev_change_features - recalculate device features
8195  *      @dev: the device to check
8196  *
8197  *      Recalculate dev->features set and send notifications even
8198  *      if they have not changed. Should be called instead of
8199  *      netdev_update_features() if also dev->vlan_features might
8200  *      have changed to allow the changes to be propagated to stacked
8201  *      VLAN devices.
8202  */
8203 void netdev_change_features(struct net_device *dev)
8204 {
8205         __netdev_update_features(dev);
8206         netdev_features_change(dev);
8207 }
8208 EXPORT_SYMBOL(netdev_change_features);
8209
8210 /**
8211  *      netif_stacked_transfer_operstate -      transfer operstate
8212  *      @rootdev: the root or lower level device to transfer state from
8213  *      @dev: the device to transfer operstate to
8214  *
8215  *      Transfer operational state from root to device. This is normally
8216  *      called when a stacking relationship exists between the root
8217  *      device and the device(a leaf device).
8218  */
8219 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
8220                                         struct net_device *dev)
8221 {
8222         if (rootdev->operstate == IF_OPER_DORMANT)
8223                 netif_dormant_on(dev);
8224         else
8225                 netif_dormant_off(dev);
8226
8227         if (netif_carrier_ok(rootdev))
8228                 netif_carrier_on(dev);
8229         else
8230                 netif_carrier_off(dev);
8231 }
8232 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
8233
8234 static int netif_alloc_rx_queues(struct net_device *dev)
8235 {
8236         unsigned int i, count = dev->num_rx_queues;
8237         struct netdev_rx_queue *rx;
8238         size_t sz = count * sizeof(*rx);
8239         int err = 0;
8240
8241         BUG_ON(count < 1);
8242
8243         rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8244         if (!rx)
8245                 return -ENOMEM;
8246
8247         dev->_rx = rx;
8248
8249         for (i = 0; i < count; i++) {
8250                 rx[i].dev = dev;
8251
8252                 /* XDP RX-queue setup */
8253                 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i);
8254                 if (err < 0)
8255                         goto err_rxq_info;
8256         }
8257         return 0;
8258
8259 err_rxq_info:
8260         /* Rollback successful reg's and free other resources */
8261         while (i--)
8262                 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
8263         kvfree(dev->_rx);
8264         dev->_rx = NULL;
8265         return err;
8266 }
8267
8268 static void netif_free_rx_queues(struct net_device *dev)
8269 {
8270         unsigned int i, count = dev->num_rx_queues;
8271
8272         /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
8273         if (!dev->_rx)
8274                 return;
8275
8276         for (i = 0; i < count; i++)
8277                 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
8278
8279         kvfree(dev->_rx);
8280 }
8281
8282 static void netdev_init_one_queue(struct net_device *dev,
8283                                   struct netdev_queue *queue, void *_unused)
8284 {
8285         /* Initialize queue lock */
8286         spin_lock_init(&queue->_xmit_lock);
8287         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
8288         queue->xmit_lock_owner = -1;
8289         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
8290         queue->dev = dev;
8291 #ifdef CONFIG_BQL
8292         dql_init(&queue->dql, HZ);
8293 #endif
8294 }
8295
8296 static void netif_free_tx_queues(struct net_device *dev)
8297 {
8298         kvfree(dev->_tx);
8299 }
8300
8301 static int netif_alloc_netdev_queues(struct net_device *dev)
8302 {
8303         unsigned int count = dev->num_tx_queues;
8304         struct netdev_queue *tx;
8305         size_t sz = count * sizeof(*tx);
8306
8307         if (count < 1 || count > 0xffff)
8308                 return -EINVAL;
8309
8310         tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8311         if (!tx)
8312                 return -ENOMEM;
8313
8314         dev->_tx = tx;
8315
8316         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
8317         spin_lock_init(&dev->tx_global_lock);
8318
8319         return 0;
8320 }
8321
8322 void netif_tx_stop_all_queues(struct net_device *dev)
8323 {
8324         unsigned int i;
8325
8326         for (i = 0; i < dev->num_tx_queues; i++) {
8327                 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
8328
8329                 netif_tx_stop_queue(txq);
8330         }
8331 }
8332 EXPORT_SYMBOL(netif_tx_stop_all_queues);
8333
8334 /**
8335  *      register_netdevice      - register a network device
8336  *      @dev: device to register
8337  *
8338  *      Take a completed network device structure and add it to the kernel
8339  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8340  *      chain. 0 is returned on success. A negative errno code is returned
8341  *      on a failure to set up the device, or if the name is a duplicate.
8342  *
8343  *      Callers must hold the rtnl semaphore. You may want
8344  *      register_netdev() instead of this.
8345  *
8346  *      BUGS:
8347  *      The locking appears insufficient to guarantee two parallel registers
8348  *      will not get the same name.
8349  */
8350
8351 int register_netdevice(struct net_device *dev)
8352 {
8353         int ret;
8354         struct net *net = dev_net(dev);
8355
8356         BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
8357                      NETDEV_FEATURE_COUNT);
8358         BUG_ON(dev_boot_phase);
8359         ASSERT_RTNL();
8360
8361         might_sleep();
8362
8363         /* When net_device's are persistent, this will be fatal. */
8364         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
8365         BUG_ON(!net);
8366
8367         spin_lock_init(&dev->addr_list_lock);
8368         netdev_set_addr_lockdep_class(dev);
8369
8370         ret = dev_get_valid_name(net, dev, dev->name);
8371         if (ret < 0)
8372                 goto out;
8373
8374         /* Init, if this function is available */
8375         if (dev->netdev_ops->ndo_init) {
8376                 ret = dev->netdev_ops->ndo_init(dev);
8377                 if (ret) {
8378                         if (ret > 0)
8379                                 ret = -EIO;
8380                         goto out;
8381                 }
8382         }
8383
8384         if (((dev->hw_features | dev->features) &
8385              NETIF_F_HW_VLAN_CTAG_FILTER) &&
8386             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
8387              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
8388                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
8389                 ret = -EINVAL;
8390                 goto err_uninit;
8391         }
8392
8393         ret = -EBUSY;
8394         if (!dev->ifindex)
8395                 dev->ifindex = dev_new_index(net);
8396         else if (__dev_get_by_index(net, dev->ifindex))
8397                 goto err_uninit;
8398
8399         /* Transfer changeable features to wanted_features and enable
8400          * software offloads (GSO and GRO).
8401          */
8402         dev->hw_features |= NETIF_F_SOFT_FEATURES;
8403         dev->features |= NETIF_F_SOFT_FEATURES;
8404
8405         if (dev->netdev_ops->ndo_udp_tunnel_add) {
8406                 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8407                 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
8408         }
8409
8410         dev->wanted_features = dev->features & dev->hw_features;
8411
8412         if (!(dev->flags & IFF_LOOPBACK))
8413                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
8414
8415         /* If IPv4 TCP segmentation offload is supported we should also
8416          * allow the device to enable segmenting the frame with the option
8417          * of ignoring a static IP ID value.  This doesn't enable the
8418          * feature itself but allows the user to enable it later.
8419          */
8420         if (dev->hw_features & NETIF_F_TSO)
8421                 dev->hw_features |= NETIF_F_TSO_MANGLEID;
8422         if (dev->vlan_features & NETIF_F_TSO)
8423                 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
8424         if (dev->mpls_features & NETIF_F_TSO)
8425                 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
8426         if (dev->hw_enc_features & NETIF_F_TSO)
8427                 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
8428
8429         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
8430          */
8431         dev->vlan_features |= NETIF_F_HIGHDMA;
8432
8433         /* Make NETIF_F_SG inheritable to tunnel devices.
8434          */
8435         dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
8436
8437         /* Make NETIF_F_SG inheritable to MPLS.
8438          */
8439         dev->mpls_features |= NETIF_F_SG;
8440
8441         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
8442         ret = notifier_to_errno(ret);
8443         if (ret)
8444                 goto err_uninit;
8445
8446         ret = netdev_register_kobject(dev);
8447         if (ret)
8448                 goto err_uninit;
8449         dev->reg_state = NETREG_REGISTERED;
8450
8451         __netdev_update_features(dev);
8452
8453         /*
8454          *      Default initial state at registry is that the
8455          *      device is present.
8456          */
8457
8458         set_bit(__LINK_STATE_PRESENT, &dev->state);
8459
8460         linkwatch_init_dev(dev);
8461
8462         dev_init_scheduler(dev);
8463         dev_hold(dev);
8464         list_netdevice(dev);
8465         add_device_randomness(dev->dev_addr, dev->addr_len);
8466
8467         /* If the device has permanent device address, driver should
8468          * set dev_addr and also addr_assign_type should be set to
8469          * NET_ADDR_PERM (default value).
8470          */
8471         if (dev->addr_assign_type == NET_ADDR_PERM)
8472                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
8473
8474         /* Notify protocols, that a new device appeared. */
8475         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
8476         ret = notifier_to_errno(ret);
8477         if (ret) {
8478                 rollback_registered(dev);
8479                 dev->reg_state = NETREG_UNREGISTERED;
8480         }
8481         /*
8482          *      Prevent userspace races by waiting until the network
8483          *      device is fully setup before sending notifications.
8484          */
8485         if (!dev->rtnl_link_ops ||
8486             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
8487                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
8488
8489 out:
8490         return ret;
8491
8492 err_uninit:
8493         if (dev->netdev_ops->ndo_uninit)
8494                 dev->netdev_ops->ndo_uninit(dev);
8495         if (dev->priv_destructor)
8496                 dev->priv_destructor(dev);
8497         goto out;
8498 }
8499 EXPORT_SYMBOL(register_netdevice);
8500
8501 /**
8502  *      init_dummy_netdev       - init a dummy network device for NAPI
8503  *      @dev: device to init
8504  *
8505  *      This takes a network device structure and initialize the minimum
8506  *      amount of fields so it can be used to schedule NAPI polls without
8507  *      registering a full blown interface. This is to be used by drivers
8508  *      that need to tie several hardware interfaces to a single NAPI
8509  *      poll scheduler due to HW limitations.
8510  */
8511 int init_dummy_netdev(struct net_device *dev)
8512 {
8513         /* Clear everything. Note we don't initialize spinlocks
8514          * are they aren't supposed to be taken by any of the
8515          * NAPI code and this dummy netdev is supposed to be
8516          * only ever used for NAPI polls
8517          */
8518         memset(dev, 0, sizeof(struct net_device));
8519
8520         /* make sure we BUG if trying to hit standard
8521          * register/unregister code path
8522          */
8523         dev->reg_state = NETREG_DUMMY;
8524
8525         /* NAPI wants this */
8526         INIT_LIST_HEAD(&dev->napi_list);
8527
8528         /* a dummy interface is started by default */
8529         set_bit(__LINK_STATE_PRESENT, &dev->state);
8530         set_bit(__LINK_STATE_START, &dev->state);
8531
8532         /* Note : We dont allocate pcpu_refcnt for dummy devices,
8533          * because users of this 'device' dont need to change
8534          * its refcount.
8535          */
8536
8537         return 0;
8538 }
8539 EXPORT_SYMBOL_GPL(init_dummy_netdev);
8540
8541
8542 /**
8543  *      register_netdev - register a network device
8544  *      @dev: device to register
8545  *
8546  *      Take a completed network device structure and add it to the kernel
8547  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
8548  *      chain. 0 is returned on success. A negative errno code is returned
8549  *      on a failure to set up the device, or if the name is a duplicate.
8550  *
8551  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
8552  *      and expands the device name if you passed a format string to
8553  *      alloc_netdev.
8554  */
8555 int register_netdev(struct net_device *dev)
8556 {
8557         int err;
8558
8559         if (rtnl_lock_killable())
8560                 return -EINTR;
8561         err = register_netdevice(dev);
8562         rtnl_unlock();
8563         return err;
8564 }
8565 EXPORT_SYMBOL(register_netdev);
8566
8567 int netdev_refcnt_read(const struct net_device *dev)
8568 {
8569         int i, refcnt = 0;
8570
8571         for_each_possible_cpu(i)
8572                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
8573         return refcnt;
8574 }
8575 EXPORT_SYMBOL(netdev_refcnt_read);
8576
8577 /**
8578  * netdev_wait_allrefs - wait until all references are gone.
8579  * @dev: target net_device
8580  *
8581  * This is called when unregistering network devices.
8582  *
8583  * Any protocol or device that holds a reference should register
8584  * for netdevice notification, and cleanup and put back the
8585  * reference if they receive an UNREGISTER event.
8586  * We can get stuck here if buggy protocols don't correctly
8587  * call dev_put.
8588  */
8589 static void netdev_wait_allrefs(struct net_device *dev)
8590 {
8591         unsigned long rebroadcast_time, warning_time;
8592         int refcnt;
8593
8594         linkwatch_forget_dev(dev);
8595
8596         rebroadcast_time = warning_time = jiffies;
8597         refcnt = netdev_refcnt_read(dev);
8598
8599         while (refcnt != 0) {
8600                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
8601                         rtnl_lock();
8602
8603                         /* Rebroadcast unregister notification */
8604                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
8605
8606                         __rtnl_unlock();
8607                         rcu_barrier();
8608                         rtnl_lock();
8609
8610                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
8611                                      &dev->state)) {
8612                                 /* We must not have linkwatch events
8613                                  * pending on unregister. If this
8614                                  * happens, we simply run the queue
8615                                  * unscheduled, resulting in a noop
8616                                  * for this device.
8617                                  */
8618                                 linkwatch_run_queue();
8619                         }
8620
8621                         __rtnl_unlock();
8622
8623                         rebroadcast_time = jiffies;
8624                 }
8625
8626                 msleep(250);
8627
8628                 refcnt = netdev_refcnt_read(dev);
8629
8630                 if (time_after(jiffies, warning_time + 10 * HZ)) {
8631                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
8632                                  dev->name, refcnt);
8633                         warning_time = jiffies;
8634                 }
8635         }
8636 }
8637
8638 /* The sequence is:
8639  *
8640  *      rtnl_lock();
8641  *      ...
8642  *      register_netdevice(x1);
8643  *      register_netdevice(x2);
8644  *      ...
8645  *      unregister_netdevice(y1);
8646  *      unregister_netdevice(y2);
8647  *      ...
8648  *      rtnl_unlock();
8649  *      free_netdev(y1);
8650  *      free_netdev(y2);
8651  *
8652  * We are invoked by rtnl_unlock().
8653  * This allows us to deal with problems:
8654  * 1) We can delete sysfs objects which invoke hotplug
8655  *    without deadlocking with linkwatch via keventd.
8656  * 2) Since we run with the RTNL semaphore not held, we can sleep
8657  *    safely in order to wait for the netdev refcnt to drop to zero.
8658  *
8659  * We must not return until all unregister events added during
8660  * the interval the lock was held have been completed.
8661  */
8662 void netdev_run_todo(void)
8663 {
8664         struct list_head list;
8665
8666         /* Snapshot list, allow later requests */
8667         list_replace_init(&net_todo_list, &list);
8668
8669         __rtnl_unlock();
8670
8671
8672         /* Wait for rcu callbacks to finish before next phase */
8673         if (!list_empty(&list))
8674                 rcu_barrier();
8675
8676         while (!list_empty(&list)) {
8677                 struct net_device *dev
8678                         = list_first_entry(&list, struct net_device, todo_list);
8679                 list_del(&dev->todo_list);
8680
8681                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
8682                         pr_err("network todo '%s' but state %d\n",
8683                                dev->name, dev->reg_state);
8684                         dump_stack();
8685                         continue;
8686                 }
8687
8688                 dev->reg_state = NETREG_UNREGISTERED;
8689
8690                 netdev_wait_allrefs(dev);
8691
8692                 /* paranoia */
8693                 BUG_ON(netdev_refcnt_read(dev));
8694                 BUG_ON(!list_empty(&dev->ptype_all));
8695                 BUG_ON(!list_empty(&dev->ptype_specific));
8696                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
8697                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
8698 #if IS_ENABLED(CONFIG_DECNET)
8699                 WARN_ON(dev->dn_ptr);
8700 #endif
8701                 if (dev->priv_destructor)
8702                         dev->priv_destructor(dev);
8703                 if (dev->needs_free_netdev)
8704                         free_netdev(dev);
8705
8706                 /* Report a network device has been unregistered */
8707                 rtnl_lock();
8708                 dev_net(dev)->dev_unreg_count--;
8709                 __rtnl_unlock();
8710                 wake_up(&netdev_unregistering_wq);
8711
8712                 /* Free network device */
8713                 kobject_put(&dev->dev.kobj);
8714         }
8715 }
8716
8717 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
8718  * all the same fields in the same order as net_device_stats, with only
8719  * the type differing, but rtnl_link_stats64 may have additional fields
8720  * at the end for newer counters.
8721  */
8722 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
8723                              const struct net_device_stats *netdev_stats)
8724 {
8725 #if BITS_PER_LONG == 64
8726         BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
8727         memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
8728         /* zero out counters that only exist in rtnl_link_stats64 */
8729         memset((char *)stats64 + sizeof(*netdev_stats), 0,
8730                sizeof(*stats64) - sizeof(*netdev_stats));
8731 #else
8732         size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
8733         const unsigned long *src = (const unsigned long *)netdev_stats;
8734         u64 *dst = (u64 *)stats64;
8735
8736         BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
8737         for (i = 0; i < n; i++)
8738                 dst[i] = src[i];
8739         /* zero out counters that only exist in rtnl_link_stats64 */
8740         memset((char *)stats64 + n * sizeof(u64), 0,
8741                sizeof(*stats64) - n * sizeof(u64));
8742 #endif
8743 }
8744 EXPORT_SYMBOL(netdev_stats_to_stats64);
8745
8746 /**
8747  *      dev_get_stats   - get network device statistics
8748  *      @dev: device to get statistics from
8749  *      @storage: place to store stats
8750  *
8751  *      Get network statistics from device. Return @storage.
8752  *      The device driver may provide its own method by setting
8753  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
8754  *      otherwise the internal statistics structure is used.
8755  */
8756 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
8757                                         struct rtnl_link_stats64 *storage)
8758 {
8759         const struct net_device_ops *ops = dev->netdev_ops;
8760
8761         if (ops->ndo_get_stats64) {
8762                 memset(storage, 0, sizeof(*storage));
8763                 ops->ndo_get_stats64(dev, storage);
8764         } else if (ops->ndo_get_stats) {
8765                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
8766         } else {
8767                 netdev_stats_to_stats64(storage, &dev->stats);
8768         }
8769         storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
8770         storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
8771         storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
8772         return storage;
8773 }
8774 EXPORT_SYMBOL(dev_get_stats);
8775
8776 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
8777 {
8778         struct netdev_queue *queue = dev_ingress_queue(dev);
8779
8780 #ifdef CONFIG_NET_CLS_ACT
8781         if (queue)
8782                 return queue;
8783         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
8784         if (!queue)
8785                 return NULL;
8786         netdev_init_one_queue(dev, queue, NULL);
8787         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
8788         queue->qdisc_sleeping = &noop_qdisc;
8789         rcu_assign_pointer(dev->ingress_queue, queue);
8790 #endif
8791         return queue;
8792 }
8793
8794 static const struct ethtool_ops default_ethtool_ops;
8795
8796 void netdev_set_default_ethtool_ops(struct net_device *dev,
8797                                     const struct ethtool_ops *ops)
8798 {
8799         if (dev->ethtool_ops == &default_ethtool_ops)
8800                 dev->ethtool_ops = ops;
8801 }
8802 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
8803
8804 void netdev_freemem(struct net_device *dev)
8805 {
8806         char *addr = (char *)dev - dev->padded;
8807
8808         kvfree(addr);
8809 }
8810
8811 /**
8812  * alloc_netdev_mqs - allocate network device
8813  * @sizeof_priv: size of private data to allocate space for
8814  * @name: device name format string
8815  * @name_assign_type: origin of device name
8816  * @setup: callback to initialize device
8817  * @txqs: the number of TX subqueues to allocate
8818  * @rxqs: the number of RX subqueues to allocate
8819  *
8820  * Allocates a struct net_device with private data area for driver use
8821  * and performs basic initialization.  Also allocates subqueue structs
8822  * for each queue on the device.
8823  */
8824 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
8825                 unsigned char name_assign_type,
8826                 void (*setup)(struct net_device *),
8827                 unsigned int txqs, unsigned int rxqs)
8828 {
8829         struct net_device *dev;
8830         unsigned int alloc_size;
8831         struct net_device *p;
8832
8833         BUG_ON(strlen(name) >= sizeof(dev->name));
8834
8835         if (txqs < 1) {
8836                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
8837                 return NULL;
8838         }
8839
8840         if (rxqs < 1) {
8841                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
8842                 return NULL;
8843         }
8844
8845         alloc_size = sizeof(struct net_device);
8846         if (sizeof_priv) {
8847                 /* ensure 32-byte alignment of private area */
8848                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
8849                 alloc_size += sizeof_priv;
8850         }
8851         /* ensure 32-byte alignment of whole construct */
8852         alloc_size += NETDEV_ALIGN - 1;
8853
8854         p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
8855         if (!p)
8856                 return NULL;
8857
8858         dev = PTR_ALIGN(p, NETDEV_ALIGN);
8859         dev->padded = (char *)dev - (char *)p;
8860
8861         dev->pcpu_refcnt = alloc_percpu(int);
8862         if (!dev->pcpu_refcnt)
8863                 goto free_dev;
8864
8865         if (dev_addr_init(dev))
8866                 goto free_pcpu;
8867
8868         dev_mc_init(dev);
8869         dev_uc_init(dev);
8870
8871         dev_net_set(dev, &init_net);
8872
8873         dev->gso_max_size = GSO_MAX_SIZE;
8874         dev->gso_max_segs = GSO_MAX_SEGS;
8875
8876         INIT_LIST_HEAD(&dev->napi_list);
8877         INIT_LIST_HEAD(&dev->unreg_list);
8878         INIT_LIST_HEAD(&dev->close_list);
8879         INIT_LIST_HEAD(&dev->link_watch_list);
8880         INIT_LIST_HEAD(&dev->adj_list.upper);
8881         INIT_LIST_HEAD(&dev->adj_list.lower);
8882         INIT_LIST_HEAD(&dev->ptype_all);
8883         INIT_LIST_HEAD(&dev->ptype_specific);
8884 #ifdef CONFIG_NET_SCHED
8885         hash_init(dev->qdisc_hash);
8886 #endif
8887         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8888         setup(dev);
8889
8890         if (!dev->tx_queue_len) {
8891                 dev->priv_flags |= IFF_NO_QUEUE;
8892                 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
8893         }
8894
8895         dev->num_tx_queues = txqs;
8896         dev->real_num_tx_queues = txqs;
8897         if (netif_alloc_netdev_queues(dev))
8898                 goto free_all;
8899
8900         dev->num_rx_queues = rxqs;
8901         dev->real_num_rx_queues = rxqs;
8902         if (netif_alloc_rx_queues(dev))
8903                 goto free_all;
8904
8905         strcpy(dev->name, name);
8906         dev->name_assign_type = name_assign_type;
8907         dev->group = INIT_NETDEV_GROUP;
8908         if (!dev->ethtool_ops)
8909                 dev->ethtool_ops = &default_ethtool_ops;
8910
8911         nf_hook_ingress_init(dev);
8912
8913         return dev;
8914
8915 free_all:
8916         free_netdev(dev);
8917         return NULL;
8918
8919 free_pcpu:
8920         free_percpu(dev->pcpu_refcnt);
8921 free_dev:
8922         netdev_freemem(dev);
8923         return NULL;
8924 }
8925 EXPORT_SYMBOL(alloc_netdev_mqs);
8926
8927 /**
8928  * free_netdev - free network device
8929  * @dev: device
8930  *
8931  * This function does the last stage of destroying an allocated device
8932  * interface. The reference to the device object is released. If this
8933  * is the last reference then it will be freed.Must be called in process
8934  * context.
8935  */
8936 void free_netdev(struct net_device *dev)
8937 {
8938         struct napi_struct *p, *n;
8939
8940         might_sleep();
8941         netif_free_tx_queues(dev);
8942         netif_free_rx_queues(dev);
8943
8944         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
8945
8946         /* Flush device addresses */
8947         dev_addr_flush(dev);
8948
8949         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
8950                 netif_napi_del(p);
8951
8952         free_percpu(dev->pcpu_refcnt);
8953         dev->pcpu_refcnt = NULL;
8954
8955         /*  Compatibility with error handling in drivers */
8956         if (dev->reg_state == NETREG_UNINITIALIZED) {
8957                 netdev_freemem(dev);
8958                 return;
8959         }
8960
8961         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
8962         dev->reg_state = NETREG_RELEASED;
8963
8964         /* will free via device release */
8965         put_device(&dev->dev);
8966 }
8967 EXPORT_SYMBOL(free_netdev);
8968
8969 /**
8970  *      synchronize_net -  Synchronize with packet receive processing
8971  *
8972  *      Wait for packets currently being received to be done.
8973  *      Does not block later packets from starting.
8974  */
8975 void synchronize_net(void)
8976 {
8977         might_sleep();
8978         if (rtnl_is_locked())
8979                 synchronize_rcu_expedited();
8980         else
8981                 synchronize_rcu();
8982 }
8983 EXPORT_SYMBOL(synchronize_net);
8984
8985 /**
8986  *      unregister_netdevice_queue - remove device from the kernel
8987  *      @dev: device
8988  *      @head: list
8989  *
8990  *      This function shuts down a device interface and removes it
8991  *      from the kernel tables.
8992  *      If head not NULL, device is queued to be unregistered later.
8993  *
8994  *      Callers must hold the rtnl semaphore.  You may want
8995  *      unregister_netdev() instead of this.
8996  */
8997
8998 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
8999 {
9000         ASSERT_RTNL();
9001
9002         if (head) {
9003                 list_move_tail(&dev->unreg_list, head);
9004         } else {
9005                 rollback_registered(dev);
9006                 /* Finish processing unregister after unlock */
9007                 net_set_todo(dev);
9008         }
9009 }
9010 EXPORT_SYMBOL(unregister_netdevice_queue);
9011
9012 /**
9013  *      unregister_netdevice_many - unregister many devices
9014  *      @head: list of devices
9015  *
9016  *  Note: As most callers use a stack allocated list_head,
9017  *  we force a list_del() to make sure stack wont be corrupted later.
9018  */
9019 void unregister_netdevice_many(struct list_head *head)
9020 {
9021         struct net_device *dev;
9022
9023         if (!list_empty(head)) {
9024                 rollback_registered_many(head);
9025                 list_for_each_entry(dev, head, unreg_list)
9026                         net_set_todo(dev);
9027                 list_del(head);
9028         }
9029 }
9030 EXPORT_SYMBOL(unregister_netdevice_many);
9031
9032 /**
9033  *      unregister_netdev - remove device from the kernel
9034  *      @dev: device
9035  *
9036  *      This function shuts down a device interface and removes it
9037  *      from the kernel tables.
9038  *
9039  *      This is just a wrapper for unregister_netdevice that takes
9040  *      the rtnl semaphore.  In general you want to use this and not
9041  *      unregister_netdevice.
9042  */
9043 void unregister_netdev(struct net_device *dev)
9044 {
9045         rtnl_lock();
9046         unregister_netdevice(dev);
9047         rtnl_unlock();
9048 }
9049 EXPORT_SYMBOL(unregister_netdev);
9050
9051 /**
9052  *      dev_change_net_namespace - move device to different nethost namespace
9053  *      @dev: device
9054  *      @net: network namespace
9055  *      @pat: If not NULL name pattern to try if the current device name
9056  *            is already taken in the destination network namespace.
9057  *
9058  *      This function shuts down a device interface and moves it
9059  *      to a new network namespace. On success 0 is returned, on
9060  *      a failure a netagive errno code is returned.
9061  *
9062  *      Callers must hold the rtnl semaphore.
9063  */
9064
9065 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
9066 {
9067         int err, new_nsid, new_ifindex;
9068
9069         ASSERT_RTNL();
9070
9071         /* Don't allow namespace local devices to be moved. */
9072         err = -EINVAL;
9073         if (dev->features & NETIF_F_NETNS_LOCAL)
9074                 goto out;
9075
9076         /* Ensure the device has been registrered */
9077         if (dev->reg_state != NETREG_REGISTERED)
9078                 goto out;
9079
9080         /* Get out if there is nothing todo */
9081         err = 0;
9082         if (net_eq(dev_net(dev), net))
9083                 goto out;
9084
9085         /* Pick the destination device name, and ensure
9086          * we can use it in the destination network namespace.
9087          */
9088         err = -EEXIST;
9089         if (__dev_get_by_name(net, dev->name)) {
9090                 /* We get here if we can't use the current device name */
9091                 if (!pat)
9092                         goto out;
9093                 err = dev_get_valid_name(net, dev, pat);
9094                 if (err < 0)
9095                         goto out;
9096         }
9097
9098         /*
9099          * And now a mini version of register_netdevice unregister_netdevice.
9100          */
9101
9102         /* If device is running close it first. */
9103         dev_close(dev);
9104
9105         /* And unlink it from device chain */
9106         unlist_netdevice(dev);
9107
9108         synchronize_net();
9109
9110         /* Shutdown queueing discipline. */
9111         dev_shutdown(dev);
9112
9113         /* Notify protocols, that we are about to destroy
9114          * this device. They should clean all the things.
9115          *
9116          * Note that dev->reg_state stays at NETREG_REGISTERED.
9117          * This is wanted because this way 8021q and macvlan know
9118          * the device is just moving and can keep their slaves up.
9119          */
9120         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
9121         rcu_barrier();
9122
9123         new_nsid = peernet2id_alloc(dev_net(dev), net);
9124         /* If there is an ifindex conflict assign a new one */
9125         if (__dev_get_by_index(net, dev->ifindex))
9126                 new_ifindex = dev_new_index(net);
9127         else
9128                 new_ifindex = dev->ifindex;
9129
9130         rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
9131                             new_ifindex);
9132
9133         /*
9134          *      Flush the unicast and multicast chains
9135          */
9136         dev_uc_flush(dev);
9137         dev_mc_flush(dev);
9138
9139         /* Send a netdev-removed uevent to the old namespace */
9140         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
9141         netdev_adjacent_del_links(dev);
9142
9143         /* Actually switch the network namespace */
9144         dev_net_set(dev, net);
9145         dev->ifindex = new_ifindex;
9146
9147         /* Send a netdev-add uevent to the new namespace */
9148         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
9149         netdev_adjacent_add_links(dev);
9150
9151         /* Fixup kobjects */
9152         err = device_rename(&dev->dev, dev->name);
9153         WARN_ON(err);
9154
9155         /* Add the device back in the hashes */
9156         list_netdevice(dev);
9157
9158         /* Notify protocols, that a new device appeared. */
9159         call_netdevice_notifiers(NETDEV_REGISTER, dev);
9160
9161         /*
9162          *      Prevent userspace races by waiting until the network
9163          *      device is fully setup before sending notifications.
9164          */
9165         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
9166
9167         synchronize_net();
9168         err = 0;
9169 out:
9170         return err;
9171 }
9172 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
9173
9174 static int dev_cpu_dead(unsigned int oldcpu)
9175 {
9176         struct sk_buff **list_skb;
9177         struct sk_buff *skb;
9178         unsigned int cpu;
9179         struct softnet_data *sd, *oldsd, *remsd = NULL;
9180
9181         local_irq_disable();
9182         cpu = smp_processor_id();
9183         sd = &per_cpu(softnet_data, cpu);
9184         oldsd = &per_cpu(softnet_data, oldcpu);
9185
9186         /* Find end of our completion_queue. */
9187         list_skb = &sd->completion_queue;
9188         while (*list_skb)
9189                 list_skb = &(*list_skb)->next;
9190         /* Append completion queue from offline CPU. */
9191         *list_skb = oldsd->completion_queue;
9192         oldsd->completion_queue = NULL;
9193
9194         /* Append output queue from offline CPU. */
9195         if (oldsd->output_queue) {
9196                 *sd->output_queue_tailp = oldsd->output_queue;
9197                 sd->output_queue_tailp = oldsd->output_queue_tailp;
9198                 oldsd->output_queue = NULL;
9199                 oldsd->output_queue_tailp = &oldsd->output_queue;
9200         }
9201         /* Append NAPI poll list from offline CPU, with one exception :
9202          * process_backlog() must be called by cpu owning percpu backlog.
9203          * We properly handle process_queue & input_pkt_queue later.
9204          */
9205         while (!list_empty(&oldsd->poll_list)) {
9206                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
9207                                                             struct napi_struct,
9208                                                             poll_list);
9209
9210                 list_del_init(&napi->poll_list);
9211                 if (napi->poll == process_backlog)
9212                         napi->state = 0;
9213                 else
9214                         ____napi_schedule(sd, napi);
9215         }
9216
9217         raise_softirq_irqoff(NET_TX_SOFTIRQ);
9218         local_irq_enable();
9219
9220 #ifdef CONFIG_RPS
9221         remsd = oldsd->rps_ipi_list;
9222         oldsd->rps_ipi_list = NULL;
9223 #endif
9224         /* send out pending IPI's on offline CPU */
9225         net_rps_send_ipi(remsd);
9226
9227         /* Process offline CPU's input_pkt_queue */
9228         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
9229                 netif_rx_ni(skb);
9230                 input_queue_head_incr(oldsd);
9231         }
9232         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
9233                 netif_rx_ni(skb);
9234                 input_queue_head_incr(oldsd);
9235         }
9236
9237         return 0;
9238 }
9239
9240 /**
9241  *      netdev_increment_features - increment feature set by one
9242  *      @all: current feature set
9243  *      @one: new feature set
9244  *      @mask: mask feature set
9245  *
9246  *      Computes a new feature set after adding a device with feature set
9247  *      @one to the master device with current feature set @all.  Will not
9248  *      enable anything that is off in @mask. Returns the new feature set.
9249  */
9250 netdev_features_t netdev_increment_features(netdev_features_t all,
9251         netdev_features_t one, netdev_features_t mask)
9252 {
9253         if (mask & NETIF_F_HW_CSUM)
9254                 mask |= NETIF_F_CSUM_MASK;
9255         mask |= NETIF_F_VLAN_CHALLENGED;
9256
9257         all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
9258         all &= one | ~NETIF_F_ALL_FOR_ALL;
9259
9260         /* If one device supports hw checksumming, set for all. */
9261         if (all & NETIF_F_HW_CSUM)
9262                 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
9263
9264         return all;
9265 }
9266 EXPORT_SYMBOL(netdev_increment_features);
9267
9268 static struct hlist_head * __net_init netdev_create_hash(void)
9269 {
9270         int i;
9271         struct hlist_head *hash;
9272
9273         hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
9274         if (hash != NULL)
9275                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
9276                         INIT_HLIST_HEAD(&hash[i]);
9277
9278         return hash;
9279 }
9280
9281 /* Initialize per network namespace state */
9282 static int __net_init netdev_init(struct net *net)
9283 {
9284         BUILD_BUG_ON(GRO_HASH_BUCKETS >
9285                      8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask));
9286
9287         if (net != &init_net)
9288                 INIT_LIST_HEAD(&net->dev_base_head);
9289
9290         net->dev_name_head = netdev_create_hash();
9291         if (net->dev_name_head == NULL)
9292                 goto err_name;
9293
9294         net->dev_index_head = netdev_create_hash();
9295         if (net->dev_index_head == NULL)
9296                 goto err_idx;
9297
9298         return 0;
9299
9300 err_idx:
9301         kfree(net->dev_name_head);
9302 err_name:
9303         return -ENOMEM;
9304 }
9305
9306 /**
9307  *      netdev_drivername - network driver for the device
9308  *      @dev: network device
9309  *
9310  *      Determine network driver for device.
9311  */
9312 const char *netdev_drivername(const struct net_device *dev)
9313 {
9314         const struct device_driver *driver;
9315         const struct device *parent;
9316         const char *empty = "";
9317
9318         parent = dev->dev.parent;
9319         if (!parent)
9320                 return empty;
9321
9322         driver = parent->driver;
9323         if (driver && driver->name)
9324                 return driver->name;
9325         return empty;
9326 }
9327
9328 static void __netdev_printk(const char *level, const struct net_device *dev,
9329                             struct va_format *vaf)
9330 {
9331         if (dev && dev->dev.parent) {
9332                 dev_printk_emit(level[1] - '0',
9333                                 dev->dev.parent,
9334                                 "%s %s %s%s: %pV",
9335                                 dev_driver_string(dev->dev.parent),
9336                                 dev_name(dev->dev.parent),
9337                                 netdev_name(dev), netdev_reg_state(dev),
9338                                 vaf);
9339         } else if (dev) {
9340                 printk("%s%s%s: %pV",
9341                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
9342         } else {
9343                 printk("%s(NULL net_device): %pV", level, vaf);
9344         }
9345 }
9346
9347 void netdev_printk(const char *level, const struct net_device *dev,
9348                    const char *format, ...)
9349 {
9350         struct va_format vaf;
9351         va_list args;
9352
9353         va_start(args, format);
9354
9355         vaf.fmt = format;
9356         vaf.va = &args;
9357
9358         __netdev_printk(level, dev, &vaf);
9359
9360         va_end(args);
9361 }
9362 EXPORT_SYMBOL(netdev_printk);
9363
9364 #define define_netdev_printk_level(func, level)                 \
9365 void func(const struct net_device *dev, const char *fmt, ...)   \
9366 {                                                               \
9367         struct va_format vaf;                                   \
9368         va_list args;                                           \
9369                                                                 \
9370         va_start(args, fmt);                                    \
9371                                                                 \
9372         vaf.fmt = fmt;                                          \
9373         vaf.va = &args;                                         \
9374                                                                 \
9375         __netdev_printk(level, dev, &vaf);                      \
9376                                                                 \
9377         va_end(args);                                           \
9378 }                                                               \
9379 EXPORT_SYMBOL(func);
9380
9381 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
9382 define_netdev_printk_level(netdev_alert, KERN_ALERT);
9383 define_netdev_printk_level(netdev_crit, KERN_CRIT);
9384 define_netdev_printk_level(netdev_err, KERN_ERR);
9385 define_netdev_printk_level(netdev_warn, KERN_WARNING);
9386 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
9387 define_netdev_printk_level(netdev_info, KERN_INFO);
9388
9389 static void __net_exit netdev_exit(struct net *net)
9390 {
9391         kfree(net->dev_name_head);
9392         kfree(net->dev_index_head);
9393         if (net != &init_net)
9394                 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
9395 }
9396
9397 static struct pernet_operations __net_initdata netdev_net_ops = {
9398         .init = netdev_init,
9399         .exit = netdev_exit,
9400 };
9401
9402 static void __net_exit default_device_exit(struct net *net)
9403 {
9404         struct net_device *dev, *aux;
9405         /*
9406          * Push all migratable network devices back to the
9407          * initial network namespace
9408          */
9409         rtnl_lock();
9410         for_each_netdev_safe(net, dev, aux) {
9411                 int err;
9412                 char fb_name[IFNAMSIZ];
9413
9414                 /* Ignore unmoveable devices (i.e. loopback) */
9415                 if (dev->features & NETIF_F_NETNS_LOCAL)
9416                         continue;
9417
9418                 /* Leave virtual devices for the generic cleanup */
9419                 if (dev->rtnl_link_ops)
9420                         continue;
9421
9422                 /* Push remaining network devices to init_net */
9423                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
9424                 err = dev_change_net_namespace(dev, &init_net, fb_name);
9425                 if (err) {
9426                         pr_emerg("%s: failed to move %s to init_net: %d\n",
9427                                  __func__, dev->name, err);
9428                         BUG();
9429                 }
9430         }
9431         rtnl_unlock();
9432 }
9433
9434 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
9435 {
9436         /* Return with the rtnl_lock held when there are no network
9437          * devices unregistering in any network namespace in net_list.
9438          */
9439         struct net *net;
9440         bool unregistering;
9441         DEFINE_WAIT_FUNC(wait, woken_wake_function);
9442
9443         add_wait_queue(&netdev_unregistering_wq, &wait);
9444         for (;;) {
9445                 unregistering = false;
9446                 rtnl_lock();
9447                 list_for_each_entry(net, net_list, exit_list) {
9448                         if (net->dev_unreg_count > 0) {
9449                                 unregistering = true;
9450                                 break;
9451                         }
9452                 }
9453                 if (!unregistering)
9454                         break;
9455                 __rtnl_unlock();
9456
9457                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
9458         }
9459         remove_wait_queue(&netdev_unregistering_wq, &wait);
9460 }
9461
9462 static void __net_exit default_device_exit_batch(struct list_head *net_list)
9463 {
9464         /* At exit all network devices most be removed from a network
9465          * namespace.  Do this in the reverse order of registration.
9466          * Do this across as many network namespaces as possible to
9467          * improve batching efficiency.
9468          */
9469         struct net_device *dev;
9470         struct net *net;
9471         LIST_HEAD(dev_kill_list);
9472
9473         /* To prevent network device cleanup code from dereferencing
9474          * loopback devices or network devices that have been freed
9475          * wait here for all pending unregistrations to complete,
9476          * before unregistring the loopback device and allowing the
9477          * network namespace be freed.
9478          *
9479          * The netdev todo list containing all network devices
9480          * unregistrations that happen in default_device_exit_batch
9481          * will run in the rtnl_unlock() at the end of
9482          * default_device_exit_batch.
9483          */
9484         rtnl_lock_unregistering(net_list);
9485         list_for_each_entry(net, net_list, exit_list) {
9486                 for_each_netdev_reverse(net, dev) {
9487                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
9488                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
9489                         else
9490                                 unregister_netdevice_queue(dev, &dev_kill_list);
9491                 }
9492         }
9493         unregister_netdevice_many(&dev_kill_list);
9494         rtnl_unlock();
9495 }
9496
9497 static struct pernet_operations __net_initdata default_device_ops = {
9498         .exit = default_device_exit,
9499         .exit_batch = default_device_exit_batch,
9500 };
9501
9502 /*
9503  *      Initialize the DEV module. At boot time this walks the device list and
9504  *      unhooks any devices that fail to initialise (normally hardware not
9505  *      present) and leaves us with a valid list of present and active devices.
9506  *
9507  */
9508
9509 /*
9510  *       This is called single threaded during boot, so no need
9511  *       to take the rtnl semaphore.
9512  */
9513 static int __init net_dev_init(void)
9514 {
9515         int i, rc = -ENOMEM;
9516
9517         BUG_ON(!dev_boot_phase);
9518
9519         if (dev_proc_init())
9520                 goto out;
9521
9522         if (netdev_kobject_init())
9523                 goto out;
9524
9525         INIT_LIST_HEAD(&ptype_all);
9526         for (i = 0; i < PTYPE_HASH_SIZE; i++)
9527                 INIT_LIST_HEAD(&ptype_base[i]);
9528
9529         INIT_LIST_HEAD(&offload_base);
9530
9531         if (register_pernet_subsys(&netdev_net_ops))
9532                 goto out;
9533
9534         /*
9535          *      Initialise the packet receive queues.
9536          */
9537
9538         for_each_possible_cpu(i) {
9539                 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
9540                 struct softnet_data *sd = &per_cpu(softnet_data, i);
9541
9542                 INIT_WORK(flush, flush_backlog);
9543
9544                 skb_queue_head_init(&sd->input_pkt_queue);
9545                 skb_queue_head_init(&sd->process_queue);
9546 #ifdef CONFIG_XFRM_OFFLOAD
9547                 skb_queue_head_init(&sd->xfrm_backlog);
9548 #endif
9549                 INIT_LIST_HEAD(&sd->poll_list);
9550                 sd->output_queue_tailp = &sd->output_queue;
9551 #ifdef CONFIG_RPS
9552                 sd->csd.func = rps_trigger_softirq;
9553                 sd->csd.info = sd;
9554                 sd->cpu = i;
9555 #endif
9556
9557                 sd->backlog.poll = process_backlog;
9558                 sd->backlog.weight = weight_p;
9559         }
9560
9561         dev_boot_phase = 0;
9562
9563         /* The loopback device is special if any other network devices
9564          * is present in a network namespace the loopback device must
9565          * be present. Since we now dynamically allocate and free the
9566          * loopback device ensure this invariant is maintained by
9567          * keeping the loopback device as the first device on the
9568          * list of network devices.  Ensuring the loopback devices
9569          * is the first device that appears and the last network device
9570          * that disappears.
9571          */
9572         if (register_pernet_device(&loopback_net_ops))
9573                 goto out;
9574
9575         if (register_pernet_device(&default_device_ops))
9576                 goto out;
9577
9578         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
9579         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
9580
9581         rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
9582                                        NULL, dev_cpu_dead);
9583         WARN_ON(rc < 0);
9584         rc = 0;
9585 out:
9586         return rc;
9587 }
9588
9589 subsys_initcall(net_dev_init);