Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-microblaze.git] / net / packet / af_packet.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              PACKET - implements raw packet sockets.
8  *
9  * Authors:     Ross Biro
10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *
13  * Fixes:
14  *              Alan Cox        :       verify_area() now used correctly
15  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
16  *              Alan Cox        :       tidied skbuff lists.
17  *              Alan Cox        :       Now uses generic datagram routines I
18  *                                      added. Also fixed the peek/read crash
19  *                                      from all old Linux datagram code.
20  *              Alan Cox        :       Uses the improved datagram code.
21  *              Alan Cox        :       Added NULL's for socket options.
22  *              Alan Cox        :       Re-commented the code.
23  *              Alan Cox        :       Use new kernel side addressing
24  *              Rob Janssen     :       Correct MTU usage.
25  *              Dave Platt      :       Counter leaks caused by incorrect
26  *                                      interrupt locking and some slightly
27  *                                      dubious gcc output. Can you read
28  *                                      compiler: it said _VOLATILE_
29  *      Richard Kooijman        :       Timestamp fixes.
30  *              Alan Cox        :       New buffers. Use sk->mac.raw.
31  *              Alan Cox        :       sendmsg/recvmsg support.
32  *              Alan Cox        :       Protocol setting support
33  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
34  *      Cyrus Durgin            :       Fixed kerneld for kmod.
35  *      Michal Ostrowski        :       Module initialization cleanup.
36  *         Ulises Alonso        :       Frame number limit removal and
37  *                                      packet_set_ring memory leak.
38  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
39  *                                      The convention is that longer addresses
40  *                                      will simply extend the hardware address
41  *                                      byte arrays at the end of sockaddr_ll
42  *                                      and packet_mreq.
43  *              Johann Baudy    :       Added TX RING.
44  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
45  *                                      layer.
46  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
47  */
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #include <linux/ethtool.h>
52 #include <linux/filter.h>
53 #include <linux/types.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kernel.h>
64 #include <linux/kmod.h>
65 #include <linux/slab.h>
66 #include <linux/vmalloc.h>
67 #include <net/net_namespace.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <linux/errno.h>
73 #include <linux/timer.h>
74 #include <linux/uaccess.h>
75 #include <asm/ioctls.h>
76 #include <asm/page.h>
77 #include <asm/cacheflush.h>
78 #include <asm/io.h>
79 #include <linux/proc_fs.h>
80 #include <linux/seq_file.h>
81 #include <linux/poll.h>
82 #include <linux/module.h>
83 #include <linux/init.h>
84 #include <linux/mutex.h>
85 #include <linux/if_vlan.h>
86 #include <linux/virtio_net.h>
87 #include <linux/errqueue.h>
88 #include <linux/net_tstamp.h>
89 #include <linux/percpu.h>
90 #ifdef CONFIG_INET
91 #include <net/inet_common.h>
92 #endif
93 #include <linux/bpf.h>
94 #include <net/compat.h>
95 #include <linux/netfilter_netdev.h>
96
97 #include "internal.h"
98
99 /*
100    Assumptions:
101    - If the device has no dev->header_ops->create, there is no LL header
102      visible above the device. In this case, its hard_header_len should be 0.
103      The device may prepend its own header internally. In this case, its
104      needed_headroom should be set to the space needed for it to add its
105      internal header.
106      For example, a WiFi driver pretending to be an Ethernet driver should
107      set its hard_header_len to be the Ethernet header length, and set its
108      needed_headroom to be (the real WiFi header length - the fake Ethernet
109      header length).
110    - packet socket receives packets with pulled ll header,
111      so that SOCK_RAW should push it back.
112
113 On receive:
114 -----------
115
116 Incoming, dev_has_header(dev) == true
117    mac_header -> ll header
118    data       -> data
119
120 Outgoing, dev_has_header(dev) == true
121    mac_header -> ll header
122    data       -> ll header
123
124 Incoming, dev_has_header(dev) == false
125    mac_header -> data
126      However drivers often make it point to the ll header.
127      This is incorrect because the ll header should be invisible to us.
128    data       -> data
129
130 Outgoing, dev_has_header(dev) == false
131    mac_header -> data. ll header is invisible to us.
132    data       -> data
133
134 Resume
135   If dev_has_header(dev) == false we are unable to restore the ll header,
136     because it is invisible to us.
137
138
139 On transmit:
140 ------------
141
142 dev_has_header(dev) == true
143    mac_header -> ll header
144    data       -> ll header
145
146 dev_has_header(dev) == false (ll header is invisible to us)
147    mac_header -> data
148    data       -> data
149
150    We should set network_header on output to the correct position,
151    packet classifier depends on it.
152  */
153
154 /* Private packet socket structures. */
155
156 /* identical to struct packet_mreq except it has
157  * a longer address field.
158  */
159 struct packet_mreq_max {
160         int             mr_ifindex;
161         unsigned short  mr_type;
162         unsigned short  mr_alen;
163         unsigned char   mr_address[MAX_ADDR_LEN];
164 };
165
166 union tpacket_uhdr {
167         struct tpacket_hdr  *h1;
168         struct tpacket2_hdr *h2;
169         struct tpacket3_hdr *h3;
170         void *raw;
171 };
172
173 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
174                 int closing, int tx_ring);
175
176 #define V3_ALIGNMENT    (8)
177
178 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
179
180 #define BLK_PLUS_PRIV(sz_of_priv) \
181         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
182
183 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
184 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
185 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
186 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
187 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
188 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
189
190 struct packet_sock;
191 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192                        struct packet_type *pt, struct net_device *orig_dev);
193
194 static void *packet_previous_frame(struct packet_sock *po,
195                 struct packet_ring_buffer *rb,
196                 int status);
197 static void packet_increment_head(struct packet_ring_buffer *buff);
198 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
199 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
200                         struct packet_sock *);
201 static void prb_retire_current_block(struct tpacket_kbdq_core *,
202                 struct packet_sock *, unsigned int status);
203 static int prb_queue_frozen(struct tpacket_kbdq_core *);
204 static void prb_open_block(struct tpacket_kbdq_core *,
205                 struct tpacket_block_desc *);
206 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
207 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
208 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
209 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
210                 struct tpacket3_hdr *);
211 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
212                 struct tpacket3_hdr *);
213 static void packet_flush_mclist(struct sock *sk);
214 static u16 packet_pick_tx_queue(struct sk_buff *skb);
215
216 struct packet_skb_cb {
217         union {
218                 struct sockaddr_pkt pkt;
219                 union {
220                         /* Trick: alias skb original length with
221                          * ll.sll_family and ll.protocol in order
222                          * to save room.
223                          */
224                         unsigned int origlen;
225                         struct sockaddr_ll ll;
226                 };
227         } sa;
228 };
229
230 #define vio_le() virtio_legacy_is_little_endian()
231
232 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
233
234 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
235 #define GET_PBLOCK_DESC(x, bid) \
236         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
237 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
238         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
239 #define GET_NEXT_PRB_BLK_NUM(x) \
240         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
241         ((x)->kactive_blk_num+1) : 0)
242
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
245
246 #ifdef CONFIG_NETFILTER_EGRESS
247 static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
248 {
249         struct sk_buff *next, *head = NULL, *tail;
250         int rc;
251
252         rcu_read_lock();
253         for (; skb != NULL; skb = next) {
254                 next = skb->next;
255                 skb_mark_not_on_list(skb);
256
257                 if (!nf_hook_egress(skb, &rc, skb->dev))
258                         continue;
259
260                 if (!head)
261                         head = skb;
262                 else
263                         tail->next = skb;
264
265                 tail = skb;
266         }
267         rcu_read_unlock();
268
269         return head;
270 }
271 #endif
272
273 static int packet_direct_xmit(struct sk_buff *skb)
274 {
275 #ifdef CONFIG_NETFILTER_EGRESS
276         if (nf_hook_egress_active()) {
277                 skb = nf_hook_direct_egress(skb);
278                 if (!skb)
279                         return NET_XMIT_DROP;
280         }
281 #endif
282         return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
283 }
284
285 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
286 {
287         struct net_device *dev;
288
289         rcu_read_lock();
290         dev = rcu_dereference(po->cached_dev);
291         dev_hold(dev);
292         rcu_read_unlock();
293
294         return dev;
295 }
296
297 static void packet_cached_dev_assign(struct packet_sock *po,
298                                      struct net_device *dev)
299 {
300         rcu_assign_pointer(po->cached_dev, dev);
301 }
302
303 static void packet_cached_dev_reset(struct packet_sock *po)
304 {
305         RCU_INIT_POINTER(po->cached_dev, NULL);
306 }
307
308 static bool packet_use_direct_xmit(const struct packet_sock *po)
309 {
310         return po->xmit == packet_direct_xmit;
311 }
312
313 static u16 packet_pick_tx_queue(struct sk_buff *skb)
314 {
315         struct net_device *dev = skb->dev;
316         const struct net_device_ops *ops = dev->netdev_ops;
317         int cpu = raw_smp_processor_id();
318         u16 queue_index;
319
320 #ifdef CONFIG_XPS
321         skb->sender_cpu = cpu + 1;
322 #endif
323         skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
324         if (ops->ndo_select_queue) {
325                 queue_index = ops->ndo_select_queue(dev, skb, NULL);
326                 queue_index = netdev_cap_txqueue(dev, queue_index);
327         } else {
328                 queue_index = netdev_pick_tx(dev, skb, NULL);
329         }
330
331         return queue_index;
332 }
333
334 /* __register_prot_hook must be invoked through register_prot_hook
335  * or from a context in which asynchronous accesses to the packet
336  * socket is not possible (packet_create()).
337  */
338 static void __register_prot_hook(struct sock *sk)
339 {
340         struct packet_sock *po = pkt_sk(sk);
341
342         if (!po->running) {
343                 if (po->fanout)
344                         __fanout_link(sk, po);
345                 else
346                         dev_add_pack(&po->prot_hook);
347
348                 sock_hold(sk);
349                 po->running = 1;
350         }
351 }
352
353 static void register_prot_hook(struct sock *sk)
354 {
355         lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
356         __register_prot_hook(sk);
357 }
358
359 /* If the sync parameter is true, we will temporarily drop
360  * the po->bind_lock and do a synchronize_net to make sure no
361  * asynchronous packet processing paths still refer to the elements
362  * of po->prot_hook.  If the sync parameter is false, it is the
363  * callers responsibility to take care of this.
364  */
365 static void __unregister_prot_hook(struct sock *sk, bool sync)
366 {
367         struct packet_sock *po = pkt_sk(sk);
368
369         lockdep_assert_held_once(&po->bind_lock);
370
371         po->running = 0;
372
373         if (po->fanout)
374                 __fanout_unlink(sk, po);
375         else
376                 __dev_remove_pack(&po->prot_hook);
377
378         __sock_put(sk);
379
380         if (sync) {
381                 spin_unlock(&po->bind_lock);
382                 synchronize_net();
383                 spin_lock(&po->bind_lock);
384         }
385 }
386
387 static void unregister_prot_hook(struct sock *sk, bool sync)
388 {
389         struct packet_sock *po = pkt_sk(sk);
390
391         if (po->running)
392                 __unregister_prot_hook(sk, sync);
393 }
394
395 static inline struct page * __pure pgv_to_page(void *addr)
396 {
397         if (is_vmalloc_addr(addr))
398                 return vmalloc_to_page(addr);
399         return virt_to_page(addr);
400 }
401
402 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
403 {
404         union tpacket_uhdr h;
405
406         h.raw = frame;
407         switch (po->tp_version) {
408         case TPACKET_V1:
409                 h.h1->tp_status = status;
410                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
411                 break;
412         case TPACKET_V2:
413                 h.h2->tp_status = status;
414                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
415                 break;
416         case TPACKET_V3:
417                 h.h3->tp_status = status;
418                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
419                 break;
420         default:
421                 WARN(1, "TPACKET version not supported.\n");
422                 BUG();
423         }
424
425         smp_wmb();
426 }
427
428 static int __packet_get_status(const struct packet_sock *po, void *frame)
429 {
430         union tpacket_uhdr h;
431
432         smp_rmb();
433
434         h.raw = frame;
435         switch (po->tp_version) {
436         case TPACKET_V1:
437                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
438                 return h.h1->tp_status;
439         case TPACKET_V2:
440                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
441                 return h.h2->tp_status;
442         case TPACKET_V3:
443                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
444                 return h.h3->tp_status;
445         default:
446                 WARN(1, "TPACKET version not supported.\n");
447                 BUG();
448                 return 0;
449         }
450 }
451
452 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
453                                    unsigned int flags)
454 {
455         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
456
457         if (shhwtstamps &&
458             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
459             ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
460                 return TP_STATUS_TS_RAW_HARDWARE;
461
462         if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
463             ktime_to_timespec64_cond(skb_tstamp(skb), ts))
464                 return TP_STATUS_TS_SOFTWARE;
465
466         return 0;
467 }
468
469 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
470                                     struct sk_buff *skb)
471 {
472         union tpacket_uhdr h;
473         struct timespec64 ts;
474         __u32 ts_status;
475
476         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
477                 return 0;
478
479         h.raw = frame;
480         /*
481          * versions 1 through 3 overflow the timestamps in y2106, since they
482          * all store the seconds in a 32-bit unsigned integer.
483          * If we create a version 4, that should have a 64-bit timestamp,
484          * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
485          * nanoseconds.
486          */
487         switch (po->tp_version) {
488         case TPACKET_V1:
489                 h.h1->tp_sec = ts.tv_sec;
490                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
491                 break;
492         case TPACKET_V2:
493                 h.h2->tp_sec = ts.tv_sec;
494                 h.h2->tp_nsec = ts.tv_nsec;
495                 break;
496         case TPACKET_V3:
497                 h.h3->tp_sec = ts.tv_sec;
498                 h.h3->tp_nsec = ts.tv_nsec;
499                 break;
500         default:
501                 WARN(1, "TPACKET version not supported.\n");
502                 BUG();
503         }
504
505         /* one flush is safe, as both fields always lie on the same cacheline */
506         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
507         smp_wmb();
508
509         return ts_status;
510 }
511
512 static void *packet_lookup_frame(const struct packet_sock *po,
513                                  const struct packet_ring_buffer *rb,
514                                  unsigned int position,
515                                  int status)
516 {
517         unsigned int pg_vec_pos, frame_offset;
518         union tpacket_uhdr h;
519
520         pg_vec_pos = position / rb->frames_per_block;
521         frame_offset = position % rb->frames_per_block;
522
523         h.raw = rb->pg_vec[pg_vec_pos].buffer +
524                 (frame_offset * rb->frame_size);
525
526         if (status != __packet_get_status(po, h.raw))
527                 return NULL;
528
529         return h.raw;
530 }
531
532 static void *packet_current_frame(struct packet_sock *po,
533                 struct packet_ring_buffer *rb,
534                 int status)
535 {
536         return packet_lookup_frame(po, rb, rb->head, status);
537 }
538
539 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
540 {
541         del_timer_sync(&pkc->retire_blk_timer);
542 }
543
544 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
545                 struct sk_buff_head *rb_queue)
546 {
547         struct tpacket_kbdq_core *pkc;
548
549         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
550
551         spin_lock_bh(&rb_queue->lock);
552         pkc->delete_blk_timer = 1;
553         spin_unlock_bh(&rb_queue->lock);
554
555         prb_del_retire_blk_timer(pkc);
556 }
557
558 static void prb_setup_retire_blk_timer(struct packet_sock *po)
559 {
560         struct tpacket_kbdq_core *pkc;
561
562         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
563         timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
564                     0);
565         pkc->retire_blk_timer.expires = jiffies;
566 }
567
568 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
569                                 int blk_size_in_bytes)
570 {
571         struct net_device *dev;
572         unsigned int mbits, div;
573         struct ethtool_link_ksettings ecmd;
574         int err;
575
576         rtnl_lock();
577         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
578         if (unlikely(!dev)) {
579                 rtnl_unlock();
580                 return DEFAULT_PRB_RETIRE_TOV;
581         }
582         err = __ethtool_get_link_ksettings(dev, &ecmd);
583         rtnl_unlock();
584         if (err)
585                 return DEFAULT_PRB_RETIRE_TOV;
586
587         /* If the link speed is so slow you don't really
588          * need to worry about perf anyways
589          */
590         if (ecmd.base.speed < SPEED_1000 ||
591             ecmd.base.speed == SPEED_UNKNOWN)
592                 return DEFAULT_PRB_RETIRE_TOV;
593
594         div = ecmd.base.speed / 1000;
595         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
596
597         if (div)
598                 mbits /= div;
599
600         if (div)
601                 return mbits + 1;
602         return mbits;
603 }
604
605 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
606                         union tpacket_req_u *req_u)
607 {
608         p1->feature_req_word = req_u->req3.tp_feature_req_word;
609 }
610
611 static void init_prb_bdqc(struct packet_sock *po,
612                         struct packet_ring_buffer *rb,
613                         struct pgv *pg_vec,
614                         union tpacket_req_u *req_u)
615 {
616         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
617         struct tpacket_block_desc *pbd;
618
619         memset(p1, 0x0, sizeof(*p1));
620
621         p1->knxt_seq_num = 1;
622         p1->pkbdq = pg_vec;
623         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
624         p1->pkblk_start = pg_vec[0].buffer;
625         p1->kblk_size = req_u->req3.tp_block_size;
626         p1->knum_blocks = req_u->req3.tp_block_nr;
627         p1->hdrlen = po->tp_hdrlen;
628         p1->version = po->tp_version;
629         p1->last_kactive_blk_num = 0;
630         po->stats.stats3.tp_freeze_q_cnt = 0;
631         if (req_u->req3.tp_retire_blk_tov)
632                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
633         else
634                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
635                                                 req_u->req3.tp_block_size);
636         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
637         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
638         rwlock_init(&p1->blk_fill_in_prog_lock);
639
640         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
641         prb_init_ft_ops(p1, req_u);
642         prb_setup_retire_blk_timer(po);
643         prb_open_block(p1, pbd);
644 }
645
646 /*  Do NOT update the last_blk_num first.
647  *  Assumes sk_buff_head lock is held.
648  */
649 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
650 {
651         mod_timer(&pkc->retire_blk_timer,
652                         jiffies + pkc->tov_in_jiffies);
653         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
654 }
655
656 /*
657  * Timer logic:
658  * 1) We refresh the timer only when we open a block.
659  *    By doing this we don't waste cycles refreshing the timer
660  *        on packet-by-packet basis.
661  *
662  * With a 1MB block-size, on a 1Gbps line, it will take
663  * i) ~8 ms to fill a block + ii) memcpy etc.
664  * In this cut we are not accounting for the memcpy time.
665  *
666  * So, if the user sets the 'tmo' to 10ms then the timer
667  * will never fire while the block is still getting filled
668  * (which is what we want). However, the user could choose
669  * to close a block early and that's fine.
670  *
671  * But when the timer does fire, we check whether or not to refresh it.
672  * Since the tmo granularity is in msecs, it is not too expensive
673  * to refresh the timer, lets say every '8' msecs.
674  * Either the user can set the 'tmo' or we can derive it based on
675  * a) line-speed and b) block-size.
676  * prb_calc_retire_blk_tmo() calculates the tmo.
677  *
678  */
679 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
680 {
681         struct packet_sock *po =
682                 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
683         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
684         unsigned int frozen;
685         struct tpacket_block_desc *pbd;
686
687         spin_lock(&po->sk.sk_receive_queue.lock);
688
689         frozen = prb_queue_frozen(pkc);
690         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
691
692         if (unlikely(pkc->delete_blk_timer))
693                 goto out;
694
695         /* We only need to plug the race when the block is partially filled.
696          * tpacket_rcv:
697          *              lock(); increment BLOCK_NUM_PKTS; unlock()
698          *              copy_bits() is in progress ...
699          *              timer fires on other cpu:
700          *              we can't retire the current block because copy_bits
701          *              is in progress.
702          *
703          */
704         if (BLOCK_NUM_PKTS(pbd)) {
705                 /* Waiting for skb_copy_bits to finish... */
706                 write_lock(&pkc->blk_fill_in_prog_lock);
707                 write_unlock(&pkc->blk_fill_in_prog_lock);
708         }
709
710         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
711                 if (!frozen) {
712                         if (!BLOCK_NUM_PKTS(pbd)) {
713                                 /* An empty block. Just refresh the timer. */
714                                 goto refresh_timer;
715                         }
716                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
717                         if (!prb_dispatch_next_block(pkc, po))
718                                 goto refresh_timer;
719                         else
720                                 goto out;
721                 } else {
722                         /* Case 1. Queue was frozen because user-space was
723                          *         lagging behind.
724                          */
725                         if (prb_curr_blk_in_use(pbd)) {
726                                 /*
727                                  * Ok, user-space is still behind.
728                                  * So just refresh the timer.
729                                  */
730                                 goto refresh_timer;
731                         } else {
732                                /* Case 2. queue was frozen,user-space caught up,
733                                 * now the link went idle && the timer fired.
734                                 * We don't have a block to close.So we open this
735                                 * block and restart the timer.
736                                 * opening a block thaws the queue,restarts timer
737                                 * Thawing/timer-refresh is a side effect.
738                                 */
739                                 prb_open_block(pkc, pbd);
740                                 goto out;
741                         }
742                 }
743         }
744
745 refresh_timer:
746         _prb_refresh_rx_retire_blk_timer(pkc);
747
748 out:
749         spin_unlock(&po->sk.sk_receive_queue.lock);
750 }
751
752 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
753                 struct tpacket_block_desc *pbd1, __u32 status)
754 {
755         /* Flush everything minus the block header */
756
757 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
758         u8 *start, *end;
759
760         start = (u8 *)pbd1;
761
762         /* Skip the block header(we know header WILL fit in 4K) */
763         start += PAGE_SIZE;
764
765         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
766         for (; start < end; start += PAGE_SIZE)
767                 flush_dcache_page(pgv_to_page(start));
768
769         smp_wmb();
770 #endif
771
772         /* Now update the block status. */
773
774         BLOCK_STATUS(pbd1) = status;
775
776         /* Flush the block header */
777
778 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
779         start = (u8 *)pbd1;
780         flush_dcache_page(pgv_to_page(start));
781
782         smp_wmb();
783 #endif
784 }
785
786 /*
787  * Side effect:
788  *
789  * 1) flush the block
790  * 2) Increment active_blk_num
791  *
792  * Note:We DONT refresh the timer on purpose.
793  *      Because almost always the next block will be opened.
794  */
795 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
796                 struct tpacket_block_desc *pbd1,
797                 struct packet_sock *po, unsigned int stat)
798 {
799         __u32 status = TP_STATUS_USER | stat;
800
801         struct tpacket3_hdr *last_pkt;
802         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
803         struct sock *sk = &po->sk;
804
805         if (atomic_read(&po->tp_drops))
806                 status |= TP_STATUS_LOSING;
807
808         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
809         last_pkt->tp_next_offset = 0;
810
811         /* Get the ts of the last pkt */
812         if (BLOCK_NUM_PKTS(pbd1)) {
813                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
814                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
815         } else {
816                 /* Ok, we tmo'd - so get the current time.
817                  *
818                  * It shouldn't really happen as we don't close empty
819                  * blocks. See prb_retire_rx_blk_timer_expired().
820                  */
821                 struct timespec64 ts;
822                 ktime_get_real_ts64(&ts);
823                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
824                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
825         }
826
827         smp_wmb();
828
829         /* Flush the block */
830         prb_flush_block(pkc1, pbd1, status);
831
832         sk->sk_data_ready(sk);
833
834         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
835 }
836
837 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
838 {
839         pkc->reset_pending_on_curr_blk = 0;
840 }
841
842 /*
843  * Side effect of opening a block:
844  *
845  * 1) prb_queue is thawed.
846  * 2) retire_blk_timer is refreshed.
847  *
848  */
849 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
850         struct tpacket_block_desc *pbd1)
851 {
852         struct timespec64 ts;
853         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
854
855         smp_rmb();
856
857         /* We could have just memset this but we will lose the
858          * flexibility of making the priv area sticky
859          */
860
861         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
862         BLOCK_NUM_PKTS(pbd1) = 0;
863         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
864
865         ktime_get_real_ts64(&ts);
866
867         h1->ts_first_pkt.ts_sec = ts.tv_sec;
868         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
869
870         pkc1->pkblk_start = (char *)pbd1;
871         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
872
873         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
874         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
875
876         pbd1->version = pkc1->version;
877         pkc1->prev = pkc1->nxt_offset;
878         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
879
880         prb_thaw_queue(pkc1);
881         _prb_refresh_rx_retire_blk_timer(pkc1);
882
883         smp_wmb();
884 }
885
886 /*
887  * Queue freeze logic:
888  * 1) Assume tp_block_nr = 8 blocks.
889  * 2) At time 't0', user opens Rx ring.
890  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
891  * 4) user-space is either sleeping or processing block '0'.
892  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
893  *    it will close block-7,loop around and try to fill block '0'.
894  *    call-flow:
895  *    __packet_lookup_frame_in_block
896  *      prb_retire_current_block()
897  *      prb_dispatch_next_block()
898  *        |->(BLOCK_STATUS == USER) evaluates to true
899  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
900  * 6) Now there are two cases:
901  *    6.1) Link goes idle right after the queue is frozen.
902  *         But remember, the last open_block() refreshed the timer.
903  *         When this timer expires,it will refresh itself so that we can
904  *         re-open block-0 in near future.
905  *    6.2) Link is busy and keeps on receiving packets. This is a simple
906  *         case and __packet_lookup_frame_in_block will check if block-0
907  *         is free and can now be re-used.
908  */
909 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
910                                   struct packet_sock *po)
911 {
912         pkc->reset_pending_on_curr_blk = 1;
913         po->stats.stats3.tp_freeze_q_cnt++;
914 }
915
916 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
917
918 /*
919  * If the next block is free then we will dispatch it
920  * and return a good offset.
921  * Else, we will freeze the queue.
922  * So, caller must check the return value.
923  */
924 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
925                 struct packet_sock *po)
926 {
927         struct tpacket_block_desc *pbd;
928
929         smp_rmb();
930
931         /* 1. Get current block num */
932         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
933
934         /* 2. If this block is currently in_use then freeze the queue */
935         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
936                 prb_freeze_queue(pkc, po);
937                 return NULL;
938         }
939
940         /*
941          * 3.
942          * open this block and return the offset where the first packet
943          * needs to get stored.
944          */
945         prb_open_block(pkc, pbd);
946         return (void *)pkc->nxt_offset;
947 }
948
949 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
950                 struct packet_sock *po, unsigned int status)
951 {
952         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
953
954         /* retire/close the current block */
955         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
956                 /*
957                  * Plug the case where copy_bits() is in progress on
958                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
959                  * have space to copy the pkt in the current block and
960                  * called prb_retire_current_block()
961                  *
962                  * We don't need to worry about the TMO case because
963                  * the timer-handler already handled this case.
964                  */
965                 if (!(status & TP_STATUS_BLK_TMO)) {
966                         /* Waiting for skb_copy_bits to finish... */
967                         write_lock(&pkc->blk_fill_in_prog_lock);
968                         write_unlock(&pkc->blk_fill_in_prog_lock);
969                 }
970                 prb_close_block(pkc, pbd, po, status);
971                 return;
972         }
973 }
974
975 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
976 {
977         return TP_STATUS_USER & BLOCK_STATUS(pbd);
978 }
979
980 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
981 {
982         return pkc->reset_pending_on_curr_blk;
983 }
984
985 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
986         __releases(&pkc->blk_fill_in_prog_lock)
987 {
988         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
989
990         read_unlock(&pkc->blk_fill_in_prog_lock);
991 }
992
993 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
994                         struct tpacket3_hdr *ppd)
995 {
996         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
997 }
998
999 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1000                         struct tpacket3_hdr *ppd)
1001 {
1002         ppd->hv1.tp_rxhash = 0;
1003 }
1004
1005 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1006                         struct tpacket3_hdr *ppd)
1007 {
1008         if (skb_vlan_tag_present(pkc->skb)) {
1009                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1010                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1011                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1012         } else {
1013                 ppd->hv1.tp_vlan_tci = 0;
1014                 ppd->hv1.tp_vlan_tpid = 0;
1015                 ppd->tp_status = TP_STATUS_AVAILABLE;
1016         }
1017 }
1018
1019 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1020                         struct tpacket3_hdr *ppd)
1021 {
1022         ppd->hv1.tp_padding = 0;
1023         prb_fill_vlan_info(pkc, ppd);
1024
1025         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1026                 prb_fill_rxhash(pkc, ppd);
1027         else
1028                 prb_clear_rxhash(pkc, ppd);
1029 }
1030
1031 static void prb_fill_curr_block(char *curr,
1032                                 struct tpacket_kbdq_core *pkc,
1033                                 struct tpacket_block_desc *pbd,
1034                                 unsigned int len)
1035         __acquires(&pkc->blk_fill_in_prog_lock)
1036 {
1037         struct tpacket3_hdr *ppd;
1038
1039         ppd  = (struct tpacket3_hdr *)curr;
1040         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1041         pkc->prev = curr;
1042         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1043         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1044         BLOCK_NUM_PKTS(pbd) += 1;
1045         read_lock(&pkc->blk_fill_in_prog_lock);
1046         prb_run_all_ft_ops(pkc, ppd);
1047 }
1048
1049 /* Assumes caller has the sk->rx_queue.lock */
1050 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1051                                             struct sk_buff *skb,
1052                                             unsigned int len
1053                                             )
1054 {
1055         struct tpacket_kbdq_core *pkc;
1056         struct tpacket_block_desc *pbd;
1057         char *curr, *end;
1058
1059         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1060         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1061
1062         /* Queue is frozen when user space is lagging behind */
1063         if (prb_queue_frozen(pkc)) {
1064                 /*
1065                  * Check if that last block which caused the queue to freeze,
1066                  * is still in_use by user-space.
1067                  */
1068                 if (prb_curr_blk_in_use(pbd)) {
1069                         /* Can't record this packet */
1070                         return NULL;
1071                 } else {
1072                         /*
1073                          * Ok, the block was released by user-space.
1074                          * Now let's open that block.
1075                          * opening a block also thaws the queue.
1076                          * Thawing is a side effect.
1077                          */
1078                         prb_open_block(pkc, pbd);
1079                 }
1080         }
1081
1082         smp_mb();
1083         curr = pkc->nxt_offset;
1084         pkc->skb = skb;
1085         end = (char *)pbd + pkc->kblk_size;
1086
1087         /* first try the current block */
1088         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1089                 prb_fill_curr_block(curr, pkc, pbd, len);
1090                 return (void *)curr;
1091         }
1092
1093         /* Ok, close the current block */
1094         prb_retire_current_block(pkc, po, 0);
1095
1096         /* Now, try to dispatch the next block */
1097         curr = (char *)prb_dispatch_next_block(pkc, po);
1098         if (curr) {
1099                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1100                 prb_fill_curr_block(curr, pkc, pbd, len);
1101                 return (void *)curr;
1102         }
1103
1104         /*
1105          * No free blocks are available.user_space hasn't caught up yet.
1106          * Queue was just frozen and now this packet will get dropped.
1107          */
1108         return NULL;
1109 }
1110
1111 static void *packet_current_rx_frame(struct packet_sock *po,
1112                                             struct sk_buff *skb,
1113                                             int status, unsigned int len)
1114 {
1115         char *curr = NULL;
1116         switch (po->tp_version) {
1117         case TPACKET_V1:
1118         case TPACKET_V2:
1119                 curr = packet_lookup_frame(po, &po->rx_ring,
1120                                         po->rx_ring.head, status);
1121                 return curr;
1122         case TPACKET_V3:
1123                 return __packet_lookup_frame_in_block(po, skb, len);
1124         default:
1125                 WARN(1, "TPACKET version not supported\n");
1126                 BUG();
1127                 return NULL;
1128         }
1129 }
1130
1131 static void *prb_lookup_block(const struct packet_sock *po,
1132                               const struct packet_ring_buffer *rb,
1133                               unsigned int idx,
1134                               int status)
1135 {
1136         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1137         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1138
1139         if (status != BLOCK_STATUS(pbd))
1140                 return NULL;
1141         return pbd;
1142 }
1143
1144 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1145 {
1146         unsigned int prev;
1147         if (rb->prb_bdqc.kactive_blk_num)
1148                 prev = rb->prb_bdqc.kactive_blk_num-1;
1149         else
1150                 prev = rb->prb_bdqc.knum_blocks-1;
1151         return prev;
1152 }
1153
1154 /* Assumes caller has held the rx_queue.lock */
1155 static void *__prb_previous_block(struct packet_sock *po,
1156                                          struct packet_ring_buffer *rb,
1157                                          int status)
1158 {
1159         unsigned int previous = prb_previous_blk_num(rb);
1160         return prb_lookup_block(po, rb, previous, status);
1161 }
1162
1163 static void *packet_previous_rx_frame(struct packet_sock *po,
1164                                              struct packet_ring_buffer *rb,
1165                                              int status)
1166 {
1167         if (po->tp_version <= TPACKET_V2)
1168                 return packet_previous_frame(po, rb, status);
1169
1170         return __prb_previous_block(po, rb, status);
1171 }
1172
1173 static void packet_increment_rx_head(struct packet_sock *po,
1174                                             struct packet_ring_buffer *rb)
1175 {
1176         switch (po->tp_version) {
1177         case TPACKET_V1:
1178         case TPACKET_V2:
1179                 return packet_increment_head(rb);
1180         case TPACKET_V3:
1181         default:
1182                 WARN(1, "TPACKET version not supported.\n");
1183                 BUG();
1184                 return;
1185         }
1186 }
1187
1188 static void *packet_previous_frame(struct packet_sock *po,
1189                 struct packet_ring_buffer *rb,
1190                 int status)
1191 {
1192         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1193         return packet_lookup_frame(po, rb, previous, status);
1194 }
1195
1196 static void packet_increment_head(struct packet_ring_buffer *buff)
1197 {
1198         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1199 }
1200
1201 static void packet_inc_pending(struct packet_ring_buffer *rb)
1202 {
1203         this_cpu_inc(*rb->pending_refcnt);
1204 }
1205
1206 static void packet_dec_pending(struct packet_ring_buffer *rb)
1207 {
1208         this_cpu_dec(*rb->pending_refcnt);
1209 }
1210
1211 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1212 {
1213         unsigned int refcnt = 0;
1214         int cpu;
1215
1216         /* We don't use pending refcount in rx_ring. */
1217         if (rb->pending_refcnt == NULL)
1218                 return 0;
1219
1220         for_each_possible_cpu(cpu)
1221                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1222
1223         return refcnt;
1224 }
1225
1226 static int packet_alloc_pending(struct packet_sock *po)
1227 {
1228         po->rx_ring.pending_refcnt = NULL;
1229
1230         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1231         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1232                 return -ENOBUFS;
1233
1234         return 0;
1235 }
1236
1237 static void packet_free_pending(struct packet_sock *po)
1238 {
1239         free_percpu(po->tx_ring.pending_refcnt);
1240 }
1241
1242 #define ROOM_POW_OFF    2
1243 #define ROOM_NONE       0x0
1244 #define ROOM_LOW        0x1
1245 #define ROOM_NORMAL     0x2
1246
1247 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1248 {
1249         int idx, len;
1250
1251         len = READ_ONCE(po->rx_ring.frame_max) + 1;
1252         idx = READ_ONCE(po->rx_ring.head);
1253         if (pow_off)
1254                 idx += len >> pow_off;
1255         if (idx >= len)
1256                 idx -= len;
1257         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1258 }
1259
1260 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1261 {
1262         int idx, len;
1263
1264         len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1265         idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1266         if (pow_off)
1267                 idx += len >> pow_off;
1268         if (idx >= len)
1269                 idx -= len;
1270         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1271 }
1272
1273 static int __packet_rcv_has_room(const struct packet_sock *po,
1274                                  const struct sk_buff *skb)
1275 {
1276         const struct sock *sk = &po->sk;
1277         int ret = ROOM_NONE;
1278
1279         if (po->prot_hook.func != tpacket_rcv) {
1280                 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1281                 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1282                                    - (skb ? skb->truesize : 0);
1283
1284                 if (avail > (rcvbuf >> ROOM_POW_OFF))
1285                         return ROOM_NORMAL;
1286                 else if (avail > 0)
1287                         return ROOM_LOW;
1288                 else
1289                         return ROOM_NONE;
1290         }
1291
1292         if (po->tp_version == TPACKET_V3) {
1293                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1294                         ret = ROOM_NORMAL;
1295                 else if (__tpacket_v3_has_room(po, 0))
1296                         ret = ROOM_LOW;
1297         } else {
1298                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1299                         ret = ROOM_NORMAL;
1300                 else if (__tpacket_has_room(po, 0))
1301                         ret = ROOM_LOW;
1302         }
1303
1304         return ret;
1305 }
1306
1307 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1308 {
1309         int pressure, ret;
1310
1311         ret = __packet_rcv_has_room(po, skb);
1312         pressure = ret != ROOM_NORMAL;
1313
1314         if (READ_ONCE(po->pressure) != pressure)
1315                 WRITE_ONCE(po->pressure, pressure);
1316
1317         return ret;
1318 }
1319
1320 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1321 {
1322         if (READ_ONCE(po->pressure) &&
1323             __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1324                 WRITE_ONCE(po->pressure,  0);
1325 }
1326
1327 static void packet_sock_destruct(struct sock *sk)
1328 {
1329         skb_queue_purge(&sk->sk_error_queue);
1330
1331         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1332         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1333
1334         if (!sock_flag(sk, SOCK_DEAD)) {
1335                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1336                 return;
1337         }
1338
1339         sk_refcnt_debug_dec(sk);
1340 }
1341
1342 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1343 {
1344         u32 *history = po->rollover->history;
1345         u32 victim, rxhash;
1346         int i, count = 0;
1347
1348         rxhash = skb_get_hash(skb);
1349         for (i = 0; i < ROLLOVER_HLEN; i++)
1350                 if (READ_ONCE(history[i]) == rxhash)
1351                         count++;
1352
1353         victim = prandom_u32() % ROLLOVER_HLEN;
1354
1355         /* Avoid dirtying the cache line if possible */
1356         if (READ_ONCE(history[victim]) != rxhash)
1357                 WRITE_ONCE(history[victim], rxhash);
1358
1359         return count > (ROLLOVER_HLEN >> 1);
1360 }
1361
1362 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1363                                       struct sk_buff *skb,
1364                                       unsigned int num)
1365 {
1366         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1367 }
1368
1369 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1370                                     struct sk_buff *skb,
1371                                     unsigned int num)
1372 {
1373         unsigned int val = atomic_inc_return(&f->rr_cur);
1374
1375         return val % num;
1376 }
1377
1378 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1379                                      struct sk_buff *skb,
1380                                      unsigned int num)
1381 {
1382         return smp_processor_id() % num;
1383 }
1384
1385 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1386                                      struct sk_buff *skb,
1387                                      unsigned int num)
1388 {
1389         return prandom_u32_max(num);
1390 }
1391
1392 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1393                                           struct sk_buff *skb,
1394                                           unsigned int idx, bool try_self,
1395                                           unsigned int num)
1396 {
1397         struct packet_sock *po, *po_next, *po_skip = NULL;
1398         unsigned int i, j, room = ROOM_NONE;
1399
1400         po = pkt_sk(rcu_dereference(f->arr[idx]));
1401
1402         if (try_self) {
1403                 room = packet_rcv_has_room(po, skb);
1404                 if (room == ROOM_NORMAL ||
1405                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1406                         return idx;
1407                 po_skip = po;
1408         }
1409
1410         i = j = min_t(int, po->rollover->sock, num - 1);
1411         do {
1412                 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1413                 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1414                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1415                         if (i != j)
1416                                 po->rollover->sock = i;
1417                         atomic_long_inc(&po->rollover->num);
1418                         if (room == ROOM_LOW)
1419                                 atomic_long_inc(&po->rollover->num_huge);
1420                         return i;
1421                 }
1422
1423                 if (++i == num)
1424                         i = 0;
1425         } while (i != j);
1426
1427         atomic_long_inc(&po->rollover->num_failed);
1428         return idx;
1429 }
1430
1431 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1432                                     struct sk_buff *skb,
1433                                     unsigned int num)
1434 {
1435         return skb_get_queue_mapping(skb) % num;
1436 }
1437
1438 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1439                                      struct sk_buff *skb,
1440                                      unsigned int num)
1441 {
1442         struct bpf_prog *prog;
1443         unsigned int ret = 0;
1444
1445         rcu_read_lock();
1446         prog = rcu_dereference(f->bpf_prog);
1447         if (prog)
1448                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1449         rcu_read_unlock();
1450
1451         return ret;
1452 }
1453
1454 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1455 {
1456         return f->flags & (flag >> 8);
1457 }
1458
1459 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1460                              struct packet_type *pt, struct net_device *orig_dev)
1461 {
1462         struct packet_fanout *f = pt->af_packet_priv;
1463         unsigned int num = READ_ONCE(f->num_members);
1464         struct net *net = read_pnet(&f->net);
1465         struct packet_sock *po;
1466         unsigned int idx;
1467
1468         if (!net_eq(dev_net(dev), net) || !num) {
1469                 kfree_skb(skb);
1470                 return 0;
1471         }
1472
1473         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1474                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1475                 if (!skb)
1476                         return 0;
1477         }
1478         switch (f->type) {
1479         case PACKET_FANOUT_HASH:
1480         default:
1481                 idx = fanout_demux_hash(f, skb, num);
1482                 break;
1483         case PACKET_FANOUT_LB:
1484                 idx = fanout_demux_lb(f, skb, num);
1485                 break;
1486         case PACKET_FANOUT_CPU:
1487                 idx = fanout_demux_cpu(f, skb, num);
1488                 break;
1489         case PACKET_FANOUT_RND:
1490                 idx = fanout_demux_rnd(f, skb, num);
1491                 break;
1492         case PACKET_FANOUT_QM:
1493                 idx = fanout_demux_qm(f, skb, num);
1494                 break;
1495         case PACKET_FANOUT_ROLLOVER:
1496                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1497                 break;
1498         case PACKET_FANOUT_CBPF:
1499         case PACKET_FANOUT_EBPF:
1500                 idx = fanout_demux_bpf(f, skb, num);
1501                 break;
1502         }
1503
1504         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1505                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1506
1507         po = pkt_sk(rcu_dereference(f->arr[idx]));
1508         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1509 }
1510
1511 DEFINE_MUTEX(fanout_mutex);
1512 EXPORT_SYMBOL_GPL(fanout_mutex);
1513 static LIST_HEAD(fanout_list);
1514 static u16 fanout_next_id;
1515
1516 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1517 {
1518         struct packet_fanout *f = po->fanout;
1519
1520         spin_lock(&f->lock);
1521         rcu_assign_pointer(f->arr[f->num_members], sk);
1522         smp_wmb();
1523         f->num_members++;
1524         if (f->num_members == 1)
1525                 dev_add_pack(&f->prot_hook);
1526         spin_unlock(&f->lock);
1527 }
1528
1529 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1530 {
1531         struct packet_fanout *f = po->fanout;
1532         int i;
1533
1534         spin_lock(&f->lock);
1535         for (i = 0; i < f->num_members; i++) {
1536                 if (rcu_dereference_protected(f->arr[i],
1537                                               lockdep_is_held(&f->lock)) == sk)
1538                         break;
1539         }
1540         BUG_ON(i >= f->num_members);
1541         rcu_assign_pointer(f->arr[i],
1542                            rcu_dereference_protected(f->arr[f->num_members - 1],
1543                                                      lockdep_is_held(&f->lock)));
1544         f->num_members--;
1545         if (f->num_members == 0)
1546                 __dev_remove_pack(&f->prot_hook);
1547         spin_unlock(&f->lock);
1548 }
1549
1550 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1551 {
1552         if (sk->sk_family != PF_PACKET)
1553                 return false;
1554
1555         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1556 }
1557
1558 static void fanout_init_data(struct packet_fanout *f)
1559 {
1560         switch (f->type) {
1561         case PACKET_FANOUT_LB:
1562                 atomic_set(&f->rr_cur, 0);
1563                 break;
1564         case PACKET_FANOUT_CBPF:
1565         case PACKET_FANOUT_EBPF:
1566                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1567                 break;
1568         }
1569 }
1570
1571 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1572 {
1573         struct bpf_prog *old;
1574
1575         spin_lock(&f->lock);
1576         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1577         rcu_assign_pointer(f->bpf_prog, new);
1578         spin_unlock(&f->lock);
1579
1580         if (old) {
1581                 synchronize_net();
1582                 bpf_prog_destroy(old);
1583         }
1584 }
1585
1586 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1587                                 unsigned int len)
1588 {
1589         struct bpf_prog *new;
1590         struct sock_fprog fprog;
1591         int ret;
1592
1593         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1594                 return -EPERM;
1595
1596         ret = copy_bpf_fprog_from_user(&fprog, data, len);
1597         if (ret)
1598                 return ret;
1599
1600         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1601         if (ret)
1602                 return ret;
1603
1604         __fanout_set_data_bpf(po->fanout, new);
1605         return 0;
1606 }
1607
1608 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1609                                 unsigned int len)
1610 {
1611         struct bpf_prog *new;
1612         u32 fd;
1613
1614         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1615                 return -EPERM;
1616         if (len != sizeof(fd))
1617                 return -EINVAL;
1618         if (copy_from_sockptr(&fd, data, len))
1619                 return -EFAULT;
1620
1621         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1622         if (IS_ERR(new))
1623                 return PTR_ERR(new);
1624
1625         __fanout_set_data_bpf(po->fanout, new);
1626         return 0;
1627 }
1628
1629 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1630                            unsigned int len)
1631 {
1632         switch (po->fanout->type) {
1633         case PACKET_FANOUT_CBPF:
1634                 return fanout_set_data_cbpf(po, data, len);
1635         case PACKET_FANOUT_EBPF:
1636                 return fanout_set_data_ebpf(po, data, len);
1637         default:
1638                 return -EINVAL;
1639         }
1640 }
1641
1642 static void fanout_release_data(struct packet_fanout *f)
1643 {
1644         switch (f->type) {
1645         case PACKET_FANOUT_CBPF:
1646         case PACKET_FANOUT_EBPF:
1647                 __fanout_set_data_bpf(f, NULL);
1648         }
1649 }
1650
1651 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1652 {
1653         struct packet_fanout *f;
1654
1655         list_for_each_entry(f, &fanout_list, list) {
1656                 if (f->id == candidate_id &&
1657                     read_pnet(&f->net) == sock_net(sk)) {
1658                         return false;
1659                 }
1660         }
1661         return true;
1662 }
1663
1664 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1665 {
1666         u16 id = fanout_next_id;
1667
1668         do {
1669                 if (__fanout_id_is_free(sk, id)) {
1670                         *new_id = id;
1671                         fanout_next_id = id + 1;
1672                         return true;
1673                 }
1674
1675                 id++;
1676         } while (id != fanout_next_id);
1677
1678         return false;
1679 }
1680
1681 static int fanout_add(struct sock *sk, struct fanout_args *args)
1682 {
1683         struct packet_rollover *rollover = NULL;
1684         struct packet_sock *po = pkt_sk(sk);
1685         u16 type_flags = args->type_flags;
1686         struct packet_fanout *f, *match;
1687         u8 type = type_flags & 0xff;
1688         u8 flags = type_flags >> 8;
1689         u16 id = args->id;
1690         int err;
1691
1692         switch (type) {
1693         case PACKET_FANOUT_ROLLOVER:
1694                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1695                         return -EINVAL;
1696                 break;
1697         case PACKET_FANOUT_HASH:
1698         case PACKET_FANOUT_LB:
1699         case PACKET_FANOUT_CPU:
1700         case PACKET_FANOUT_RND:
1701         case PACKET_FANOUT_QM:
1702         case PACKET_FANOUT_CBPF:
1703         case PACKET_FANOUT_EBPF:
1704                 break;
1705         default:
1706                 return -EINVAL;
1707         }
1708
1709         mutex_lock(&fanout_mutex);
1710
1711         err = -EALREADY;
1712         if (po->fanout)
1713                 goto out;
1714
1715         if (type == PACKET_FANOUT_ROLLOVER ||
1716             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1717                 err = -ENOMEM;
1718                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1719                 if (!rollover)
1720                         goto out;
1721                 atomic_long_set(&rollover->num, 0);
1722                 atomic_long_set(&rollover->num_huge, 0);
1723                 atomic_long_set(&rollover->num_failed, 0);
1724         }
1725
1726         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1727                 if (id != 0) {
1728                         err = -EINVAL;
1729                         goto out;
1730                 }
1731                 if (!fanout_find_new_id(sk, &id)) {
1732                         err = -ENOMEM;
1733                         goto out;
1734                 }
1735                 /* ephemeral flag for the first socket in the group: drop it */
1736                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1737         }
1738
1739         match = NULL;
1740         list_for_each_entry(f, &fanout_list, list) {
1741                 if (f->id == id &&
1742                     read_pnet(&f->net) == sock_net(sk)) {
1743                         match = f;
1744                         break;
1745                 }
1746         }
1747         err = -EINVAL;
1748         if (match) {
1749                 if (match->flags != flags)
1750                         goto out;
1751                 if (args->max_num_members &&
1752                     args->max_num_members != match->max_num_members)
1753                         goto out;
1754         } else {
1755                 if (args->max_num_members > PACKET_FANOUT_MAX)
1756                         goto out;
1757                 if (!args->max_num_members)
1758                         /* legacy PACKET_FANOUT_MAX */
1759                         args->max_num_members = 256;
1760                 err = -ENOMEM;
1761                 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1762                                  GFP_KERNEL);
1763                 if (!match)
1764                         goto out;
1765                 write_pnet(&match->net, sock_net(sk));
1766                 match->id = id;
1767                 match->type = type;
1768                 match->flags = flags;
1769                 INIT_LIST_HEAD(&match->list);
1770                 spin_lock_init(&match->lock);
1771                 refcount_set(&match->sk_ref, 0);
1772                 fanout_init_data(match);
1773                 match->prot_hook.type = po->prot_hook.type;
1774                 match->prot_hook.dev = po->prot_hook.dev;
1775                 match->prot_hook.func = packet_rcv_fanout;
1776                 match->prot_hook.af_packet_priv = match;
1777                 match->prot_hook.af_packet_net = read_pnet(&match->net);
1778                 match->prot_hook.id_match = match_fanout_group;
1779                 match->max_num_members = args->max_num_members;
1780                 list_add(&match->list, &fanout_list);
1781         }
1782         err = -EINVAL;
1783
1784         spin_lock(&po->bind_lock);
1785         if (po->running &&
1786             match->type == type &&
1787             match->prot_hook.type == po->prot_hook.type &&
1788             match->prot_hook.dev == po->prot_hook.dev) {
1789                 err = -ENOSPC;
1790                 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1791                         __dev_remove_pack(&po->prot_hook);
1792
1793                         /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1794                         WRITE_ONCE(po->fanout, match);
1795
1796                         po->rollover = rollover;
1797                         rollover = NULL;
1798                         refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1799                         __fanout_link(sk, po);
1800                         err = 0;
1801                 }
1802         }
1803         spin_unlock(&po->bind_lock);
1804
1805         if (err && !refcount_read(&match->sk_ref)) {
1806                 list_del(&match->list);
1807                 kvfree(match);
1808         }
1809
1810 out:
1811         kfree(rollover);
1812         mutex_unlock(&fanout_mutex);
1813         return err;
1814 }
1815
1816 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1817  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1818  * It is the responsibility of the caller to call fanout_release_data() and
1819  * free the returned packet_fanout (after synchronize_net())
1820  */
1821 static struct packet_fanout *fanout_release(struct sock *sk)
1822 {
1823         struct packet_sock *po = pkt_sk(sk);
1824         struct packet_fanout *f;
1825
1826         mutex_lock(&fanout_mutex);
1827         f = po->fanout;
1828         if (f) {
1829                 po->fanout = NULL;
1830
1831                 if (refcount_dec_and_test(&f->sk_ref))
1832                         list_del(&f->list);
1833                 else
1834                         f = NULL;
1835         }
1836         mutex_unlock(&fanout_mutex);
1837
1838         return f;
1839 }
1840
1841 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1842                                           struct sk_buff *skb)
1843 {
1844         /* Earlier code assumed this would be a VLAN pkt, double-check
1845          * this now that we have the actual packet in hand. We can only
1846          * do this check on Ethernet devices.
1847          */
1848         if (unlikely(dev->type != ARPHRD_ETHER))
1849                 return false;
1850
1851         skb_reset_mac_header(skb);
1852         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1853 }
1854
1855 static const struct proto_ops packet_ops;
1856
1857 static const struct proto_ops packet_ops_spkt;
1858
1859 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1860                            struct packet_type *pt, struct net_device *orig_dev)
1861 {
1862         struct sock *sk;
1863         struct sockaddr_pkt *spkt;
1864
1865         /*
1866          *      When we registered the protocol we saved the socket in the data
1867          *      field for just this event.
1868          */
1869
1870         sk = pt->af_packet_priv;
1871
1872         /*
1873          *      Yank back the headers [hope the device set this
1874          *      right or kerboom...]
1875          *
1876          *      Incoming packets have ll header pulled,
1877          *      push it back.
1878          *
1879          *      For outgoing ones skb->data == skb_mac_header(skb)
1880          *      so that this procedure is noop.
1881          */
1882
1883         if (skb->pkt_type == PACKET_LOOPBACK)
1884                 goto out;
1885
1886         if (!net_eq(dev_net(dev), sock_net(sk)))
1887                 goto out;
1888
1889         skb = skb_share_check(skb, GFP_ATOMIC);
1890         if (skb == NULL)
1891                 goto oom;
1892
1893         /* drop any routing info */
1894         skb_dst_drop(skb);
1895
1896         /* drop conntrack reference */
1897         nf_reset_ct(skb);
1898
1899         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1900
1901         skb_push(skb, skb->data - skb_mac_header(skb));
1902
1903         /*
1904          *      The SOCK_PACKET socket receives _all_ frames.
1905          */
1906
1907         spkt->spkt_family = dev->type;
1908         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1909         spkt->spkt_protocol = skb->protocol;
1910
1911         /*
1912          *      Charge the memory to the socket. This is done specifically
1913          *      to prevent sockets using all the memory up.
1914          */
1915
1916         if (sock_queue_rcv_skb(sk, skb) == 0)
1917                 return 0;
1918
1919 out:
1920         kfree_skb(skb);
1921 oom:
1922         return 0;
1923 }
1924
1925 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1926 {
1927         if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1928             sock->type == SOCK_RAW) {
1929                 skb_reset_mac_header(skb);
1930                 skb->protocol = dev_parse_header_protocol(skb);
1931         }
1932
1933         skb_probe_transport_header(skb);
1934 }
1935
1936 /*
1937  *      Output a raw packet to a device layer. This bypasses all the other
1938  *      protocol layers and you must therefore supply it with a complete frame
1939  */
1940
1941 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1942                                size_t len)
1943 {
1944         struct sock *sk = sock->sk;
1945         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1946         struct sk_buff *skb = NULL;
1947         struct net_device *dev;
1948         struct sockcm_cookie sockc;
1949         __be16 proto = 0;
1950         int err;
1951         int extra_len = 0;
1952
1953         /*
1954          *      Get and verify the address.
1955          */
1956
1957         if (saddr) {
1958                 if (msg->msg_namelen < sizeof(struct sockaddr))
1959                         return -EINVAL;
1960                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1961                         proto = saddr->spkt_protocol;
1962         } else
1963                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1964
1965         /*
1966          *      Find the device first to size check it
1967          */
1968
1969         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1970 retry:
1971         rcu_read_lock();
1972         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1973         err = -ENODEV;
1974         if (dev == NULL)
1975                 goto out_unlock;
1976
1977         err = -ENETDOWN;
1978         if (!(dev->flags & IFF_UP))
1979                 goto out_unlock;
1980
1981         /*
1982          * You may not queue a frame bigger than the mtu. This is the lowest level
1983          * raw protocol and you must do your own fragmentation at this level.
1984          */
1985
1986         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1987                 if (!netif_supports_nofcs(dev)) {
1988                         err = -EPROTONOSUPPORT;
1989                         goto out_unlock;
1990                 }
1991                 extra_len = 4; /* We're doing our own CRC */
1992         }
1993
1994         err = -EMSGSIZE;
1995         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1996                 goto out_unlock;
1997
1998         if (!skb) {
1999                 size_t reserved = LL_RESERVED_SPACE(dev);
2000                 int tlen = dev->needed_tailroom;
2001                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2002
2003                 rcu_read_unlock();
2004                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2005                 if (skb == NULL)
2006                         return -ENOBUFS;
2007                 /* FIXME: Save some space for broken drivers that write a hard
2008                  * header at transmission time by themselves. PPP is the notable
2009                  * one here. This should really be fixed at the driver level.
2010                  */
2011                 skb_reserve(skb, reserved);
2012                 skb_reset_network_header(skb);
2013
2014                 /* Try to align data part correctly */
2015                 if (hhlen) {
2016                         skb->data -= hhlen;
2017                         skb->tail -= hhlen;
2018                         if (len < hhlen)
2019                                 skb_reset_network_header(skb);
2020                 }
2021                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
2022                 if (err)
2023                         goto out_free;
2024                 goto retry;
2025         }
2026
2027         if (!dev_validate_header(dev, skb->data, len)) {
2028                 err = -EINVAL;
2029                 goto out_unlock;
2030         }
2031         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2032             !packet_extra_vlan_len_allowed(dev, skb)) {
2033                 err = -EMSGSIZE;
2034                 goto out_unlock;
2035         }
2036
2037         sockcm_init(&sockc, sk);
2038         if (msg->msg_controllen) {
2039                 err = sock_cmsg_send(sk, msg, &sockc);
2040                 if (unlikely(err))
2041                         goto out_unlock;
2042         }
2043
2044         skb->protocol = proto;
2045         skb->dev = dev;
2046         skb->priority = sk->sk_priority;
2047         skb->mark = sk->sk_mark;
2048         skb->tstamp = sockc.transmit_time;
2049
2050         skb_setup_tx_timestamp(skb, sockc.tsflags);
2051
2052         if (unlikely(extra_len == 4))
2053                 skb->no_fcs = 1;
2054
2055         packet_parse_headers(skb, sock);
2056
2057         dev_queue_xmit(skb);
2058         rcu_read_unlock();
2059         return len;
2060
2061 out_unlock:
2062         rcu_read_unlock();
2063 out_free:
2064         kfree_skb(skb);
2065         return err;
2066 }
2067
2068 static unsigned int run_filter(struct sk_buff *skb,
2069                                const struct sock *sk,
2070                                unsigned int res)
2071 {
2072         struct sk_filter *filter;
2073
2074         rcu_read_lock();
2075         filter = rcu_dereference(sk->sk_filter);
2076         if (filter != NULL)
2077                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2078         rcu_read_unlock();
2079
2080         return res;
2081 }
2082
2083 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2084                            size_t *len)
2085 {
2086         struct virtio_net_hdr vnet_hdr;
2087
2088         if (*len < sizeof(vnet_hdr))
2089                 return -EINVAL;
2090         *len -= sizeof(vnet_hdr);
2091
2092         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2093                 return -EINVAL;
2094
2095         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2096 }
2097
2098 /*
2099  * This function makes lazy skb cloning in hope that most of packets
2100  * are discarded by BPF.
2101  *
2102  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2103  * and skb->cb are mangled. It works because (and until) packets
2104  * falling here are owned by current CPU. Output packets are cloned
2105  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2106  * sequentially, so that if we return skb to original state on exit,
2107  * we will not harm anyone.
2108  */
2109
2110 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2111                       struct packet_type *pt, struct net_device *orig_dev)
2112 {
2113         struct sock *sk;
2114         struct sockaddr_ll *sll;
2115         struct packet_sock *po;
2116         u8 *skb_head = skb->data;
2117         int skb_len = skb->len;
2118         unsigned int snaplen, res;
2119         bool is_drop_n_account = false;
2120
2121         if (skb->pkt_type == PACKET_LOOPBACK)
2122                 goto drop;
2123
2124         sk = pt->af_packet_priv;
2125         po = pkt_sk(sk);
2126
2127         if (!net_eq(dev_net(dev), sock_net(sk)))
2128                 goto drop;
2129
2130         skb->dev = dev;
2131
2132         if (dev_has_header(dev)) {
2133                 /* The device has an explicit notion of ll header,
2134                  * exported to higher levels.
2135                  *
2136                  * Otherwise, the device hides details of its frame
2137                  * structure, so that corresponding packet head is
2138                  * never delivered to user.
2139                  */
2140                 if (sk->sk_type != SOCK_DGRAM)
2141                         skb_push(skb, skb->data - skb_mac_header(skb));
2142                 else if (skb->pkt_type == PACKET_OUTGOING) {
2143                         /* Special case: outgoing packets have ll header at head */
2144                         skb_pull(skb, skb_network_offset(skb));
2145                 }
2146         }
2147
2148         snaplen = skb->len;
2149
2150         res = run_filter(skb, sk, snaplen);
2151         if (!res)
2152                 goto drop_n_restore;
2153         if (snaplen > res)
2154                 snaplen = res;
2155
2156         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2157                 goto drop_n_acct;
2158
2159         if (skb_shared(skb)) {
2160                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2161                 if (nskb == NULL)
2162                         goto drop_n_acct;
2163
2164                 if (skb_head != skb->data) {
2165                         skb->data = skb_head;
2166                         skb->len = skb_len;
2167                 }
2168                 consume_skb(skb);
2169                 skb = nskb;
2170         }
2171
2172         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2173
2174         sll = &PACKET_SKB_CB(skb)->sa.ll;
2175         sll->sll_hatype = dev->type;
2176         sll->sll_pkttype = skb->pkt_type;
2177         if (unlikely(po->origdev))
2178                 sll->sll_ifindex = orig_dev->ifindex;
2179         else
2180                 sll->sll_ifindex = dev->ifindex;
2181
2182         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2183
2184         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2185          * Use their space for storing the original skb length.
2186          */
2187         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2188
2189         if (pskb_trim(skb, snaplen))
2190                 goto drop_n_acct;
2191
2192         skb_set_owner_r(skb, sk);
2193         skb->dev = NULL;
2194         skb_dst_drop(skb);
2195
2196         /* drop conntrack reference */
2197         nf_reset_ct(skb);
2198
2199         spin_lock(&sk->sk_receive_queue.lock);
2200         po->stats.stats1.tp_packets++;
2201         sock_skb_set_dropcount(sk, skb);
2202         skb_clear_delivery_time(skb);
2203         __skb_queue_tail(&sk->sk_receive_queue, skb);
2204         spin_unlock(&sk->sk_receive_queue.lock);
2205         sk->sk_data_ready(sk);
2206         return 0;
2207
2208 drop_n_acct:
2209         is_drop_n_account = true;
2210         atomic_inc(&po->tp_drops);
2211         atomic_inc(&sk->sk_drops);
2212
2213 drop_n_restore:
2214         if (skb_head != skb->data && skb_shared(skb)) {
2215                 skb->data = skb_head;
2216                 skb->len = skb_len;
2217         }
2218 drop:
2219         if (!is_drop_n_account)
2220                 consume_skb(skb);
2221         else
2222                 kfree_skb(skb);
2223         return 0;
2224 }
2225
2226 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2227                        struct packet_type *pt, struct net_device *orig_dev)
2228 {
2229         struct sock *sk;
2230         struct packet_sock *po;
2231         struct sockaddr_ll *sll;
2232         union tpacket_uhdr h;
2233         u8 *skb_head = skb->data;
2234         int skb_len = skb->len;
2235         unsigned int snaplen, res;
2236         unsigned long status = TP_STATUS_USER;
2237         unsigned short macoff, hdrlen;
2238         unsigned int netoff;
2239         struct sk_buff *copy_skb = NULL;
2240         struct timespec64 ts;
2241         __u32 ts_status;
2242         bool is_drop_n_account = false;
2243         unsigned int slot_id = 0;
2244         bool do_vnet = false;
2245
2246         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2247          * We may add members to them until current aligned size without forcing
2248          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2249          */
2250         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2251         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2252
2253         if (skb->pkt_type == PACKET_LOOPBACK)
2254                 goto drop;
2255
2256         sk = pt->af_packet_priv;
2257         po = pkt_sk(sk);
2258
2259         if (!net_eq(dev_net(dev), sock_net(sk)))
2260                 goto drop;
2261
2262         if (dev_has_header(dev)) {
2263                 if (sk->sk_type != SOCK_DGRAM)
2264                         skb_push(skb, skb->data - skb_mac_header(skb));
2265                 else if (skb->pkt_type == PACKET_OUTGOING) {
2266                         /* Special case: outgoing packets have ll header at head */
2267                         skb_pull(skb, skb_network_offset(skb));
2268                 }
2269         }
2270
2271         snaplen = skb->len;
2272
2273         res = run_filter(skb, sk, snaplen);
2274         if (!res)
2275                 goto drop_n_restore;
2276
2277         /* If we are flooded, just give up */
2278         if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2279                 atomic_inc(&po->tp_drops);
2280                 goto drop_n_restore;
2281         }
2282
2283         if (skb->ip_summed == CHECKSUM_PARTIAL)
2284                 status |= TP_STATUS_CSUMNOTREADY;
2285         else if (skb->pkt_type != PACKET_OUTGOING &&
2286                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2287                   skb_csum_unnecessary(skb)))
2288                 status |= TP_STATUS_CSUM_VALID;
2289
2290         if (snaplen > res)
2291                 snaplen = res;
2292
2293         if (sk->sk_type == SOCK_DGRAM) {
2294                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2295                                   po->tp_reserve;
2296         } else {
2297                 unsigned int maclen = skb_network_offset(skb);
2298                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2299                                        (maclen < 16 ? 16 : maclen)) +
2300                                        po->tp_reserve;
2301                 if (po->has_vnet_hdr) {
2302                         netoff += sizeof(struct virtio_net_hdr);
2303                         do_vnet = true;
2304                 }
2305                 macoff = netoff - maclen;
2306         }
2307         if (netoff > USHRT_MAX) {
2308                 atomic_inc(&po->tp_drops);
2309                 goto drop_n_restore;
2310         }
2311         if (po->tp_version <= TPACKET_V2) {
2312                 if (macoff + snaplen > po->rx_ring.frame_size) {
2313                         if (po->copy_thresh &&
2314                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2315                                 if (skb_shared(skb)) {
2316                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2317                                 } else {
2318                                         copy_skb = skb_get(skb);
2319                                         skb_head = skb->data;
2320                                 }
2321                                 if (copy_skb) {
2322                                         memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2323                                                sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2324                                         skb_set_owner_r(copy_skb, sk);
2325                                 }
2326                         }
2327                         snaplen = po->rx_ring.frame_size - macoff;
2328                         if ((int)snaplen < 0) {
2329                                 snaplen = 0;
2330                                 do_vnet = false;
2331                         }
2332                 }
2333         } else if (unlikely(macoff + snaplen >
2334                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2335                 u32 nval;
2336
2337                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2338                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2339                             snaplen, nval, macoff);
2340                 snaplen = nval;
2341                 if (unlikely((int)snaplen < 0)) {
2342                         snaplen = 0;
2343                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2344                         do_vnet = false;
2345                 }
2346         }
2347         spin_lock(&sk->sk_receive_queue.lock);
2348         h.raw = packet_current_rx_frame(po, skb,
2349                                         TP_STATUS_KERNEL, (macoff+snaplen));
2350         if (!h.raw)
2351                 goto drop_n_account;
2352
2353         if (po->tp_version <= TPACKET_V2) {
2354                 slot_id = po->rx_ring.head;
2355                 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2356                         goto drop_n_account;
2357                 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2358         }
2359
2360         if (do_vnet &&
2361             virtio_net_hdr_from_skb(skb, h.raw + macoff -
2362                                     sizeof(struct virtio_net_hdr),
2363                                     vio_le(), true, 0)) {
2364                 if (po->tp_version == TPACKET_V3)
2365                         prb_clear_blk_fill_status(&po->rx_ring);
2366                 goto drop_n_account;
2367         }
2368
2369         if (po->tp_version <= TPACKET_V2) {
2370                 packet_increment_rx_head(po, &po->rx_ring);
2371         /*
2372          * LOSING will be reported till you read the stats,
2373          * because it's COR - Clear On Read.
2374          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2375          * at packet level.
2376          */
2377                 if (atomic_read(&po->tp_drops))
2378                         status |= TP_STATUS_LOSING;
2379         }
2380
2381         po->stats.stats1.tp_packets++;
2382         if (copy_skb) {
2383                 status |= TP_STATUS_COPY;
2384                 skb_clear_delivery_time(copy_skb);
2385                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2386         }
2387         spin_unlock(&sk->sk_receive_queue.lock);
2388
2389         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2390
2391         /* Always timestamp; prefer an existing software timestamp taken
2392          * closer to the time of capture.
2393          */
2394         ts_status = tpacket_get_timestamp(skb, &ts,
2395                                           po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2396         if (!ts_status)
2397                 ktime_get_real_ts64(&ts);
2398
2399         status |= ts_status;
2400
2401         switch (po->tp_version) {
2402         case TPACKET_V1:
2403                 h.h1->tp_len = skb->len;
2404                 h.h1->tp_snaplen = snaplen;
2405                 h.h1->tp_mac = macoff;
2406                 h.h1->tp_net = netoff;
2407                 h.h1->tp_sec = ts.tv_sec;
2408                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2409                 hdrlen = sizeof(*h.h1);
2410                 break;
2411         case TPACKET_V2:
2412                 h.h2->tp_len = skb->len;
2413                 h.h2->tp_snaplen = snaplen;
2414                 h.h2->tp_mac = macoff;
2415                 h.h2->tp_net = netoff;
2416                 h.h2->tp_sec = ts.tv_sec;
2417                 h.h2->tp_nsec = ts.tv_nsec;
2418                 if (skb_vlan_tag_present(skb)) {
2419                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2420                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2421                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2422                 } else {
2423                         h.h2->tp_vlan_tci = 0;
2424                         h.h2->tp_vlan_tpid = 0;
2425                 }
2426                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2427                 hdrlen = sizeof(*h.h2);
2428                 break;
2429         case TPACKET_V3:
2430                 /* tp_nxt_offset,vlan are already populated above.
2431                  * So DONT clear those fields here
2432                  */
2433                 h.h3->tp_status |= status;
2434                 h.h3->tp_len = skb->len;
2435                 h.h3->tp_snaplen = snaplen;
2436                 h.h3->tp_mac = macoff;
2437                 h.h3->tp_net = netoff;
2438                 h.h3->tp_sec  = ts.tv_sec;
2439                 h.h3->tp_nsec = ts.tv_nsec;
2440                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2441                 hdrlen = sizeof(*h.h3);
2442                 break;
2443         default:
2444                 BUG();
2445         }
2446
2447         sll = h.raw + TPACKET_ALIGN(hdrlen);
2448         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2449         sll->sll_family = AF_PACKET;
2450         sll->sll_hatype = dev->type;
2451         sll->sll_protocol = skb->protocol;
2452         sll->sll_pkttype = skb->pkt_type;
2453         if (unlikely(po->origdev))
2454                 sll->sll_ifindex = orig_dev->ifindex;
2455         else
2456                 sll->sll_ifindex = dev->ifindex;
2457
2458         smp_mb();
2459
2460 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2461         if (po->tp_version <= TPACKET_V2) {
2462                 u8 *start, *end;
2463
2464                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2465                                         macoff + snaplen);
2466
2467                 for (start = h.raw; start < end; start += PAGE_SIZE)
2468                         flush_dcache_page(pgv_to_page(start));
2469         }
2470         smp_wmb();
2471 #endif
2472
2473         if (po->tp_version <= TPACKET_V2) {
2474                 spin_lock(&sk->sk_receive_queue.lock);
2475                 __packet_set_status(po, h.raw, status);
2476                 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2477                 spin_unlock(&sk->sk_receive_queue.lock);
2478                 sk->sk_data_ready(sk);
2479         } else if (po->tp_version == TPACKET_V3) {
2480                 prb_clear_blk_fill_status(&po->rx_ring);
2481         }
2482
2483 drop_n_restore:
2484         if (skb_head != skb->data && skb_shared(skb)) {
2485                 skb->data = skb_head;
2486                 skb->len = skb_len;
2487         }
2488 drop:
2489         if (!is_drop_n_account)
2490                 consume_skb(skb);
2491         else
2492                 kfree_skb(skb);
2493         return 0;
2494
2495 drop_n_account:
2496         spin_unlock(&sk->sk_receive_queue.lock);
2497         atomic_inc(&po->tp_drops);
2498         is_drop_n_account = true;
2499
2500         sk->sk_data_ready(sk);
2501         kfree_skb(copy_skb);
2502         goto drop_n_restore;
2503 }
2504
2505 static void tpacket_destruct_skb(struct sk_buff *skb)
2506 {
2507         struct packet_sock *po = pkt_sk(skb->sk);
2508
2509         if (likely(po->tx_ring.pg_vec)) {
2510                 void *ph;
2511                 __u32 ts;
2512
2513                 ph = skb_zcopy_get_nouarg(skb);
2514                 packet_dec_pending(&po->tx_ring);
2515
2516                 ts = __packet_set_timestamp(po, ph, skb);
2517                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2518
2519                 if (!packet_read_pending(&po->tx_ring))
2520                         complete(&po->skb_completion);
2521         }
2522
2523         sock_wfree(skb);
2524 }
2525
2526 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2527 {
2528         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2529             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2530              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2531               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2532                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2533                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2534                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2535
2536         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2537                 return -EINVAL;
2538
2539         return 0;
2540 }
2541
2542 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2543                                  struct virtio_net_hdr *vnet_hdr)
2544 {
2545         if (*len < sizeof(*vnet_hdr))
2546                 return -EINVAL;
2547         *len -= sizeof(*vnet_hdr);
2548
2549         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2550                 return -EFAULT;
2551
2552         return __packet_snd_vnet_parse(vnet_hdr, *len);
2553 }
2554
2555 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2556                 void *frame, struct net_device *dev, void *data, int tp_len,
2557                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2558                 const struct sockcm_cookie *sockc)
2559 {
2560         union tpacket_uhdr ph;
2561         int to_write, offset, len, nr_frags, len_max;
2562         struct socket *sock = po->sk.sk_socket;
2563         struct page *page;
2564         int err;
2565
2566         ph.raw = frame;
2567
2568         skb->protocol = proto;
2569         skb->dev = dev;
2570         skb->priority = po->sk.sk_priority;
2571         skb->mark = po->sk.sk_mark;
2572         skb->tstamp = sockc->transmit_time;
2573         skb_setup_tx_timestamp(skb, sockc->tsflags);
2574         skb_zcopy_set_nouarg(skb, ph.raw);
2575
2576         skb_reserve(skb, hlen);
2577         skb_reset_network_header(skb);
2578
2579         to_write = tp_len;
2580
2581         if (sock->type == SOCK_DGRAM) {
2582                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2583                                 NULL, tp_len);
2584                 if (unlikely(err < 0))
2585                         return -EINVAL;
2586         } else if (copylen) {
2587                 int hdrlen = min_t(int, copylen, tp_len);
2588
2589                 skb_push(skb, dev->hard_header_len);
2590                 skb_put(skb, copylen - dev->hard_header_len);
2591                 err = skb_store_bits(skb, 0, data, hdrlen);
2592                 if (unlikely(err))
2593                         return err;
2594                 if (!dev_validate_header(dev, skb->data, hdrlen))
2595                         return -EINVAL;
2596
2597                 data += hdrlen;
2598                 to_write -= hdrlen;
2599         }
2600
2601         offset = offset_in_page(data);
2602         len_max = PAGE_SIZE - offset;
2603         len = ((to_write > len_max) ? len_max : to_write);
2604
2605         skb->data_len = to_write;
2606         skb->len += to_write;
2607         skb->truesize += to_write;
2608         refcount_add(to_write, &po->sk.sk_wmem_alloc);
2609
2610         while (likely(to_write)) {
2611                 nr_frags = skb_shinfo(skb)->nr_frags;
2612
2613                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2614                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2615                                MAX_SKB_FRAGS);
2616                         return -EFAULT;
2617                 }
2618
2619                 page = pgv_to_page(data);
2620                 data += len;
2621                 flush_dcache_page(page);
2622                 get_page(page);
2623                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2624                 to_write -= len;
2625                 offset = 0;
2626                 len_max = PAGE_SIZE;
2627                 len = ((to_write > len_max) ? len_max : to_write);
2628         }
2629
2630         packet_parse_headers(skb, sock);
2631
2632         return tp_len;
2633 }
2634
2635 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2636                                 int size_max, void **data)
2637 {
2638         union tpacket_uhdr ph;
2639         int tp_len, off;
2640
2641         ph.raw = frame;
2642
2643         switch (po->tp_version) {
2644         case TPACKET_V3:
2645                 if (ph.h3->tp_next_offset != 0) {
2646                         pr_warn_once("variable sized slot not supported");
2647                         return -EINVAL;
2648                 }
2649                 tp_len = ph.h3->tp_len;
2650                 break;
2651         case TPACKET_V2:
2652                 tp_len = ph.h2->tp_len;
2653                 break;
2654         default:
2655                 tp_len = ph.h1->tp_len;
2656                 break;
2657         }
2658         if (unlikely(tp_len > size_max)) {
2659                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2660                 return -EMSGSIZE;
2661         }
2662
2663         if (unlikely(po->tp_tx_has_off)) {
2664                 int off_min, off_max;
2665
2666                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2667                 off_max = po->tx_ring.frame_size - tp_len;
2668                 if (po->sk.sk_type == SOCK_DGRAM) {
2669                         switch (po->tp_version) {
2670                         case TPACKET_V3:
2671                                 off = ph.h3->tp_net;
2672                                 break;
2673                         case TPACKET_V2:
2674                                 off = ph.h2->tp_net;
2675                                 break;
2676                         default:
2677                                 off = ph.h1->tp_net;
2678                                 break;
2679                         }
2680                 } else {
2681                         switch (po->tp_version) {
2682                         case TPACKET_V3:
2683                                 off = ph.h3->tp_mac;
2684                                 break;
2685                         case TPACKET_V2:
2686                                 off = ph.h2->tp_mac;
2687                                 break;
2688                         default:
2689                                 off = ph.h1->tp_mac;
2690                                 break;
2691                         }
2692                 }
2693                 if (unlikely((off < off_min) || (off_max < off)))
2694                         return -EINVAL;
2695         } else {
2696                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2697         }
2698
2699         *data = frame + off;
2700         return tp_len;
2701 }
2702
2703 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2704 {
2705         struct sk_buff *skb = NULL;
2706         struct net_device *dev;
2707         struct virtio_net_hdr *vnet_hdr = NULL;
2708         struct sockcm_cookie sockc;
2709         __be16 proto;
2710         int err, reserve = 0;
2711         void *ph;
2712         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2713         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2714         unsigned char *addr = NULL;
2715         int tp_len, size_max;
2716         void *data;
2717         int len_sum = 0;
2718         int status = TP_STATUS_AVAILABLE;
2719         int hlen, tlen, copylen = 0;
2720         long timeo = 0;
2721
2722         mutex_lock(&po->pg_vec_lock);
2723
2724         /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2725          * we need to confirm it under protection of pg_vec_lock.
2726          */
2727         if (unlikely(!po->tx_ring.pg_vec)) {
2728                 err = -EBUSY;
2729                 goto out;
2730         }
2731         if (likely(saddr == NULL)) {
2732                 dev     = packet_cached_dev_get(po);
2733                 proto   = READ_ONCE(po->num);
2734         } else {
2735                 err = -EINVAL;
2736                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2737                         goto out;
2738                 if (msg->msg_namelen < (saddr->sll_halen
2739                                         + offsetof(struct sockaddr_ll,
2740                                                 sll_addr)))
2741                         goto out;
2742                 proto   = saddr->sll_protocol;
2743                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2744                 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2745                         if (dev && msg->msg_namelen < dev->addr_len +
2746                                    offsetof(struct sockaddr_ll, sll_addr))
2747                                 goto out_put;
2748                         addr = saddr->sll_addr;
2749                 }
2750         }
2751
2752         err = -ENXIO;
2753         if (unlikely(dev == NULL))
2754                 goto out;
2755         err = -ENETDOWN;
2756         if (unlikely(!(dev->flags & IFF_UP)))
2757                 goto out_put;
2758
2759         sockcm_init(&sockc, &po->sk);
2760         if (msg->msg_controllen) {
2761                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2762                 if (unlikely(err))
2763                         goto out_put;
2764         }
2765
2766         if (po->sk.sk_socket->type == SOCK_RAW)
2767                 reserve = dev->hard_header_len;
2768         size_max = po->tx_ring.frame_size
2769                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2770
2771         if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2772                 size_max = dev->mtu + reserve + VLAN_HLEN;
2773
2774         reinit_completion(&po->skb_completion);
2775
2776         do {
2777                 ph = packet_current_frame(po, &po->tx_ring,
2778                                           TP_STATUS_SEND_REQUEST);
2779                 if (unlikely(ph == NULL)) {
2780                         if (need_wait && skb) {
2781                                 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2782                                 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2783                                 if (timeo <= 0) {
2784                                         err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2785                                         goto out_put;
2786                                 }
2787                         }
2788                         /* check for additional frames */
2789                         continue;
2790                 }
2791
2792                 skb = NULL;
2793                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2794                 if (tp_len < 0)
2795                         goto tpacket_error;
2796
2797                 status = TP_STATUS_SEND_REQUEST;
2798                 hlen = LL_RESERVED_SPACE(dev);
2799                 tlen = dev->needed_tailroom;
2800                 if (po->has_vnet_hdr) {
2801                         vnet_hdr = data;
2802                         data += sizeof(*vnet_hdr);
2803                         tp_len -= sizeof(*vnet_hdr);
2804                         if (tp_len < 0 ||
2805                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2806                                 tp_len = -EINVAL;
2807                                 goto tpacket_error;
2808                         }
2809                         copylen = __virtio16_to_cpu(vio_le(),
2810                                                     vnet_hdr->hdr_len);
2811                 }
2812                 copylen = max_t(int, copylen, dev->hard_header_len);
2813                 skb = sock_alloc_send_skb(&po->sk,
2814                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2815                                 (copylen - dev->hard_header_len),
2816                                 !need_wait, &err);
2817
2818                 if (unlikely(skb == NULL)) {
2819                         /* we assume the socket was initially writeable ... */
2820                         if (likely(len_sum > 0))
2821                                 err = len_sum;
2822                         goto out_status;
2823                 }
2824                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2825                                           addr, hlen, copylen, &sockc);
2826                 if (likely(tp_len >= 0) &&
2827                     tp_len > dev->mtu + reserve &&
2828                     !po->has_vnet_hdr &&
2829                     !packet_extra_vlan_len_allowed(dev, skb))
2830                         tp_len = -EMSGSIZE;
2831
2832                 if (unlikely(tp_len < 0)) {
2833 tpacket_error:
2834                         if (po->tp_loss) {
2835                                 __packet_set_status(po, ph,
2836                                                 TP_STATUS_AVAILABLE);
2837                                 packet_increment_head(&po->tx_ring);
2838                                 kfree_skb(skb);
2839                                 continue;
2840                         } else {
2841                                 status = TP_STATUS_WRONG_FORMAT;
2842                                 err = tp_len;
2843                                 goto out_status;
2844                         }
2845                 }
2846
2847                 if (po->has_vnet_hdr) {
2848                         if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2849                                 tp_len = -EINVAL;
2850                                 goto tpacket_error;
2851                         }
2852                         virtio_net_hdr_set_proto(skb, vnet_hdr);
2853                 }
2854
2855                 skb->destructor = tpacket_destruct_skb;
2856                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2857                 packet_inc_pending(&po->tx_ring);
2858
2859                 status = TP_STATUS_SEND_REQUEST;
2860                 err = po->xmit(skb);
2861                 if (unlikely(err != 0)) {
2862                         if (err > 0)
2863                                 err = net_xmit_errno(err);
2864                         if (err && __packet_get_status(po, ph) ==
2865                                    TP_STATUS_AVAILABLE) {
2866                                 /* skb was destructed already */
2867                                 skb = NULL;
2868                                 goto out_status;
2869                         }
2870                         /*
2871                          * skb was dropped but not destructed yet;
2872                          * let's treat it like congestion or err < 0
2873                          */
2874                         err = 0;
2875                 }
2876                 packet_increment_head(&po->tx_ring);
2877                 len_sum += tp_len;
2878         } while (likely((ph != NULL) ||
2879                 /* Note: packet_read_pending() might be slow if we have
2880                  * to call it as it's per_cpu variable, but in fast-path
2881                  * we already short-circuit the loop with the first
2882                  * condition, and luckily don't have to go that path
2883                  * anyway.
2884                  */
2885                  (need_wait && packet_read_pending(&po->tx_ring))));
2886
2887         err = len_sum;
2888         goto out_put;
2889
2890 out_status:
2891         __packet_set_status(po, ph, status);
2892         kfree_skb(skb);
2893 out_put:
2894         dev_put(dev);
2895 out:
2896         mutex_unlock(&po->pg_vec_lock);
2897         return err;
2898 }
2899
2900 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2901                                         size_t reserve, size_t len,
2902                                         size_t linear, int noblock,
2903                                         int *err)
2904 {
2905         struct sk_buff *skb;
2906
2907         /* Under a page?  Don't bother with paged skb. */
2908         if (prepad + len < PAGE_SIZE || !linear)
2909                 linear = len;
2910
2911         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2912                                    err, 0);
2913         if (!skb)
2914                 return NULL;
2915
2916         skb_reserve(skb, reserve);
2917         skb_put(skb, linear);
2918         skb->data_len = len - linear;
2919         skb->len += len - linear;
2920
2921         return skb;
2922 }
2923
2924 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2925 {
2926         struct sock *sk = sock->sk;
2927         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2928         struct sk_buff *skb;
2929         struct net_device *dev;
2930         __be16 proto;
2931         unsigned char *addr = NULL;
2932         int err, reserve = 0;
2933         struct sockcm_cookie sockc;
2934         struct virtio_net_hdr vnet_hdr = { 0 };
2935         int offset = 0;
2936         struct packet_sock *po = pkt_sk(sk);
2937         bool has_vnet_hdr = false;
2938         int hlen, tlen, linear;
2939         int extra_len = 0;
2940
2941         /*
2942          *      Get and verify the address.
2943          */
2944
2945         if (likely(saddr == NULL)) {
2946                 dev     = packet_cached_dev_get(po);
2947                 proto   = READ_ONCE(po->num);
2948         } else {
2949                 err = -EINVAL;
2950                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2951                         goto out;
2952                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2953                         goto out;
2954                 proto   = saddr->sll_protocol;
2955                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2956                 if (sock->type == SOCK_DGRAM) {
2957                         if (dev && msg->msg_namelen < dev->addr_len +
2958                                    offsetof(struct sockaddr_ll, sll_addr))
2959                                 goto out_unlock;
2960                         addr = saddr->sll_addr;
2961                 }
2962         }
2963
2964         err = -ENXIO;
2965         if (unlikely(dev == NULL))
2966                 goto out_unlock;
2967         err = -ENETDOWN;
2968         if (unlikely(!(dev->flags & IFF_UP)))
2969                 goto out_unlock;
2970
2971         sockcm_init(&sockc, sk);
2972         sockc.mark = sk->sk_mark;
2973         if (msg->msg_controllen) {
2974                 err = sock_cmsg_send(sk, msg, &sockc);
2975                 if (unlikely(err))
2976                         goto out_unlock;
2977         }
2978
2979         if (sock->type == SOCK_RAW)
2980                 reserve = dev->hard_header_len;
2981         if (po->has_vnet_hdr) {
2982                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2983                 if (err)
2984                         goto out_unlock;
2985                 has_vnet_hdr = true;
2986         }
2987
2988         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2989                 if (!netif_supports_nofcs(dev)) {
2990                         err = -EPROTONOSUPPORT;
2991                         goto out_unlock;
2992                 }
2993                 extra_len = 4; /* We're doing our own CRC */
2994         }
2995
2996         err = -EMSGSIZE;
2997         if (!vnet_hdr.gso_type &&
2998             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2999                 goto out_unlock;
3000
3001         err = -ENOBUFS;
3002         hlen = LL_RESERVED_SPACE(dev);
3003         tlen = dev->needed_tailroom;
3004         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3005         linear = max(linear, min_t(int, len, dev->hard_header_len));
3006         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3007                                msg->msg_flags & MSG_DONTWAIT, &err);
3008         if (skb == NULL)
3009                 goto out_unlock;
3010
3011         skb_reset_network_header(skb);
3012
3013         err = -EINVAL;
3014         if (sock->type == SOCK_DGRAM) {
3015                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3016                 if (unlikely(offset < 0))
3017                         goto out_free;
3018         } else if (reserve) {
3019                 skb_reserve(skb, -reserve);
3020                 if (len < reserve + sizeof(struct ipv6hdr) &&
3021                     dev->min_header_len != dev->hard_header_len)
3022                         skb_reset_network_header(skb);
3023         }
3024
3025         /* Returns -EFAULT on error */
3026         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3027         if (err)
3028                 goto out_free;
3029
3030         if (sock->type == SOCK_RAW &&
3031             !dev_validate_header(dev, skb->data, len)) {
3032                 err = -EINVAL;
3033                 goto out_free;
3034         }
3035
3036         skb_setup_tx_timestamp(skb, sockc.tsflags);
3037
3038         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3039             !packet_extra_vlan_len_allowed(dev, skb)) {
3040                 err = -EMSGSIZE;
3041                 goto out_free;
3042         }
3043
3044         skb->protocol = proto;
3045         skb->dev = dev;
3046         skb->priority = sk->sk_priority;
3047         skb->mark = sockc.mark;
3048         skb->tstamp = sockc.transmit_time;
3049
3050         if (has_vnet_hdr) {
3051                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3052                 if (err)
3053                         goto out_free;
3054                 len += sizeof(vnet_hdr);
3055                 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3056         }
3057
3058         packet_parse_headers(skb, sock);
3059
3060         if (unlikely(extra_len == 4))
3061                 skb->no_fcs = 1;
3062
3063         err = po->xmit(skb);
3064         if (unlikely(err != 0)) {
3065                 if (err > 0)
3066                         err = net_xmit_errno(err);
3067                 if (err)
3068                         goto out_unlock;
3069         }
3070
3071         dev_put(dev);
3072
3073         return len;
3074
3075 out_free:
3076         kfree_skb(skb);
3077 out_unlock:
3078         dev_put(dev);
3079 out:
3080         return err;
3081 }
3082
3083 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3084 {
3085         struct sock *sk = sock->sk;
3086         struct packet_sock *po = pkt_sk(sk);
3087
3088         /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3089          * tpacket_snd() will redo the check safely.
3090          */
3091         if (data_race(po->tx_ring.pg_vec))
3092                 return tpacket_snd(po, msg);
3093
3094         return packet_snd(sock, msg, len);
3095 }
3096
3097 /*
3098  *      Close a PACKET socket. This is fairly simple. We immediately go
3099  *      to 'closed' state and remove our protocol entry in the device list.
3100  */
3101
3102 static int packet_release(struct socket *sock)
3103 {
3104         struct sock *sk = sock->sk;
3105         struct packet_sock *po;
3106         struct packet_fanout *f;
3107         struct net *net;
3108         union tpacket_req_u req_u;
3109
3110         if (!sk)
3111                 return 0;
3112
3113         net = sock_net(sk);
3114         po = pkt_sk(sk);
3115
3116         mutex_lock(&net->packet.sklist_lock);
3117         sk_del_node_init_rcu(sk);
3118         mutex_unlock(&net->packet.sklist_lock);
3119
3120         sock_prot_inuse_add(net, sk->sk_prot, -1);
3121
3122         spin_lock(&po->bind_lock);
3123         unregister_prot_hook(sk, false);
3124         packet_cached_dev_reset(po);
3125
3126         if (po->prot_hook.dev) {
3127                 dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3128                 po->prot_hook.dev = NULL;
3129         }
3130         spin_unlock(&po->bind_lock);
3131
3132         packet_flush_mclist(sk);
3133
3134         lock_sock(sk);
3135         if (po->rx_ring.pg_vec) {
3136                 memset(&req_u, 0, sizeof(req_u));
3137                 packet_set_ring(sk, &req_u, 1, 0);
3138         }
3139
3140         if (po->tx_ring.pg_vec) {
3141                 memset(&req_u, 0, sizeof(req_u));
3142                 packet_set_ring(sk, &req_u, 1, 1);
3143         }
3144         release_sock(sk);
3145
3146         f = fanout_release(sk);
3147
3148         synchronize_net();
3149
3150         kfree(po->rollover);
3151         if (f) {
3152                 fanout_release_data(f);
3153                 kvfree(f);
3154         }
3155         /*
3156          *      Now the socket is dead. No more input will appear.
3157          */
3158         sock_orphan(sk);
3159         sock->sk = NULL;
3160
3161         /* Purge queues */
3162
3163         skb_queue_purge(&sk->sk_receive_queue);
3164         packet_free_pending(po);
3165         sk_refcnt_debug_release(sk);
3166
3167         sock_put(sk);
3168         return 0;
3169 }
3170
3171 /*
3172  *      Attach a packet hook.
3173  */
3174
3175 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3176                           __be16 proto)
3177 {
3178         struct packet_sock *po = pkt_sk(sk);
3179         struct net_device *dev = NULL;
3180         bool unlisted = false;
3181         bool need_rehook;
3182         int ret = 0;
3183
3184         lock_sock(sk);
3185         spin_lock(&po->bind_lock);
3186         rcu_read_lock();
3187
3188         if (po->fanout) {
3189                 ret = -EINVAL;
3190                 goto out_unlock;
3191         }
3192
3193         if (name) {
3194                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3195                 if (!dev) {
3196                         ret = -ENODEV;
3197                         goto out_unlock;
3198                 }
3199         } else if (ifindex) {
3200                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3201                 if (!dev) {
3202                         ret = -ENODEV;
3203                         goto out_unlock;
3204                 }
3205         }
3206
3207         need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3208
3209         if (need_rehook) {
3210                 dev_hold(dev);
3211                 if (po->running) {
3212                         rcu_read_unlock();
3213                         /* prevents packet_notifier() from calling
3214                          * register_prot_hook()
3215                          */
3216                         WRITE_ONCE(po->num, 0);
3217                         __unregister_prot_hook(sk, true);
3218                         rcu_read_lock();
3219                         if (dev)
3220                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3221                                                                  dev->ifindex);
3222                 }
3223
3224                 BUG_ON(po->running);
3225                 WRITE_ONCE(po->num, proto);
3226                 po->prot_hook.type = proto;
3227
3228                 dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3229
3230                 if (unlikely(unlisted)) {
3231                         po->prot_hook.dev = NULL;
3232                         WRITE_ONCE(po->ifindex, -1);
3233                         packet_cached_dev_reset(po);
3234                 } else {
3235                         dev_hold_track(dev, &po->prot_hook.dev_tracker,
3236                                        GFP_ATOMIC);
3237                         po->prot_hook.dev = dev;
3238                         WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3239                         packet_cached_dev_assign(po, dev);
3240                 }
3241                 dev_put(dev);
3242         }
3243
3244         if (proto == 0 || !need_rehook)
3245                 goto out_unlock;
3246
3247         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3248                 register_prot_hook(sk);
3249         } else {
3250                 sk->sk_err = ENETDOWN;
3251                 if (!sock_flag(sk, SOCK_DEAD))
3252                         sk_error_report(sk);
3253         }
3254
3255 out_unlock:
3256         rcu_read_unlock();
3257         spin_unlock(&po->bind_lock);
3258         release_sock(sk);
3259         return ret;
3260 }
3261
3262 /*
3263  *      Bind a packet socket to a device
3264  */
3265
3266 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3267                             int addr_len)
3268 {
3269         struct sock *sk = sock->sk;
3270         char name[sizeof(uaddr->sa_data) + 1];
3271
3272         /*
3273          *      Check legality
3274          */
3275
3276         if (addr_len != sizeof(struct sockaddr))
3277                 return -EINVAL;
3278         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3279          * zero-terminated.
3280          */
3281         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3282         name[sizeof(uaddr->sa_data)] = 0;
3283
3284         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3285 }
3286
3287 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3288 {
3289         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3290         struct sock *sk = sock->sk;
3291
3292         /*
3293          *      Check legality
3294          */
3295
3296         if (addr_len < sizeof(struct sockaddr_ll))
3297                 return -EINVAL;
3298         if (sll->sll_family != AF_PACKET)
3299                 return -EINVAL;
3300
3301         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3302                               sll->sll_protocol ? : pkt_sk(sk)->num);
3303 }
3304
3305 static struct proto packet_proto = {
3306         .name     = "PACKET",
3307         .owner    = THIS_MODULE,
3308         .obj_size = sizeof(struct packet_sock),
3309 };
3310
3311 /*
3312  *      Create a packet of type SOCK_PACKET.
3313  */
3314
3315 static int packet_create(struct net *net, struct socket *sock, int protocol,
3316                          int kern)
3317 {
3318         struct sock *sk;
3319         struct packet_sock *po;
3320         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3321         int err;
3322
3323         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3324                 return -EPERM;
3325         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3326             sock->type != SOCK_PACKET)
3327                 return -ESOCKTNOSUPPORT;
3328
3329         sock->state = SS_UNCONNECTED;
3330
3331         err = -ENOBUFS;
3332         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3333         if (sk == NULL)
3334                 goto out;
3335
3336         sock->ops = &packet_ops;
3337         if (sock->type == SOCK_PACKET)
3338                 sock->ops = &packet_ops_spkt;
3339
3340         sock_init_data(sock, sk);
3341
3342         po = pkt_sk(sk);
3343         init_completion(&po->skb_completion);
3344         sk->sk_family = PF_PACKET;
3345         po->num = proto;
3346         po->xmit = dev_queue_xmit;
3347
3348         err = packet_alloc_pending(po);
3349         if (err)
3350                 goto out2;
3351
3352         packet_cached_dev_reset(po);
3353
3354         sk->sk_destruct = packet_sock_destruct;
3355         sk_refcnt_debug_inc(sk);
3356
3357         /*
3358          *      Attach a protocol block
3359          */
3360
3361         spin_lock_init(&po->bind_lock);
3362         mutex_init(&po->pg_vec_lock);
3363         po->rollover = NULL;
3364         po->prot_hook.func = packet_rcv;
3365
3366         if (sock->type == SOCK_PACKET)
3367                 po->prot_hook.func = packet_rcv_spkt;
3368
3369         po->prot_hook.af_packet_priv = sk;
3370         po->prot_hook.af_packet_net = sock_net(sk);
3371
3372         if (proto) {
3373                 po->prot_hook.type = proto;
3374                 __register_prot_hook(sk);
3375         }
3376
3377         mutex_lock(&net->packet.sklist_lock);
3378         sk_add_node_tail_rcu(sk, &net->packet.sklist);
3379         mutex_unlock(&net->packet.sklist_lock);
3380
3381         sock_prot_inuse_add(net, &packet_proto, 1);
3382
3383         return 0;
3384 out2:
3385         sk_free(sk);
3386 out:
3387         return err;
3388 }
3389
3390 /*
3391  *      Pull a packet from our receive queue and hand it to the user.
3392  *      If necessary we block.
3393  */
3394
3395 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3396                           int flags)
3397 {
3398         struct sock *sk = sock->sk;
3399         struct sk_buff *skb;
3400         int copied, err;
3401         int vnet_hdr_len = 0;
3402         unsigned int origlen = 0;
3403
3404         err = -EINVAL;
3405         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3406                 goto out;
3407
3408 #if 0
3409         /* What error should we return now? EUNATTACH? */
3410         if (pkt_sk(sk)->ifindex < 0)
3411                 return -ENODEV;
3412 #endif
3413
3414         if (flags & MSG_ERRQUEUE) {
3415                 err = sock_recv_errqueue(sk, msg, len,
3416                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3417                 goto out;
3418         }
3419
3420         /*
3421          *      Call the generic datagram receiver. This handles all sorts
3422          *      of horrible races and re-entrancy so we can forget about it
3423          *      in the protocol layers.
3424          *
3425          *      Now it will return ENETDOWN, if device have just gone down,
3426          *      but then it will block.
3427          */
3428
3429         skb = skb_recv_datagram(sk, flags, &err);
3430
3431         /*
3432          *      An error occurred so return it. Because skb_recv_datagram()
3433          *      handles the blocking we don't see and worry about blocking
3434          *      retries.
3435          */
3436
3437         if (skb == NULL)
3438                 goto out;
3439
3440         packet_rcv_try_clear_pressure(pkt_sk(sk));
3441
3442         if (pkt_sk(sk)->has_vnet_hdr) {
3443                 err = packet_rcv_vnet(msg, skb, &len);
3444                 if (err)
3445                         goto out_free;
3446                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3447         }
3448
3449         /* You lose any data beyond the buffer you gave. If it worries
3450          * a user program they can ask the device for its MTU
3451          * anyway.
3452          */
3453         copied = skb->len;
3454         if (copied > len) {
3455                 copied = len;
3456                 msg->msg_flags |= MSG_TRUNC;
3457         }
3458
3459         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3460         if (err)
3461                 goto out_free;
3462
3463         if (sock->type != SOCK_PACKET) {
3464                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3465
3466                 /* Original length was stored in sockaddr_ll fields */
3467                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3468                 sll->sll_family = AF_PACKET;
3469                 sll->sll_protocol = skb->protocol;
3470         }
3471
3472         sock_recv_ts_and_drops(msg, sk, skb);
3473
3474         if (msg->msg_name) {
3475                 const size_t max_len = min(sizeof(skb->cb),
3476                                            sizeof(struct sockaddr_storage));
3477                 int copy_len;
3478
3479                 /* If the address length field is there to be filled
3480                  * in, we fill it in now.
3481                  */
3482                 if (sock->type == SOCK_PACKET) {
3483                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3484                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3485                         copy_len = msg->msg_namelen;
3486                 } else {
3487                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3488
3489                         msg->msg_namelen = sll->sll_halen +
3490                                 offsetof(struct sockaddr_ll, sll_addr);
3491                         copy_len = msg->msg_namelen;
3492                         if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3493                                 memset(msg->msg_name +
3494                                        offsetof(struct sockaddr_ll, sll_addr),
3495                                        0, sizeof(sll->sll_addr));
3496                                 msg->msg_namelen = sizeof(struct sockaddr_ll);
3497                         }
3498                 }
3499                 if (WARN_ON_ONCE(copy_len > max_len)) {
3500                         copy_len = max_len;
3501                         msg->msg_namelen = copy_len;
3502                 }
3503                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3504         }
3505
3506         if (pkt_sk(sk)->auxdata) {
3507                 struct tpacket_auxdata aux;
3508
3509                 aux.tp_status = TP_STATUS_USER;
3510                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3511                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3512                 else if (skb->pkt_type != PACKET_OUTGOING &&
3513                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3514                           skb_csum_unnecessary(skb)))
3515                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3516
3517                 aux.tp_len = origlen;
3518                 aux.tp_snaplen = skb->len;
3519                 aux.tp_mac = 0;
3520                 aux.tp_net = skb_network_offset(skb);
3521                 if (skb_vlan_tag_present(skb)) {
3522                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3523                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3524                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3525                 } else {
3526                         aux.tp_vlan_tci = 0;
3527                         aux.tp_vlan_tpid = 0;
3528                 }
3529                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3530         }
3531
3532         /*
3533          *      Free or return the buffer as appropriate. Again this
3534          *      hides all the races and re-entrancy issues from us.
3535          */
3536         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3537
3538 out_free:
3539         skb_free_datagram(sk, skb);
3540 out:
3541         return err;
3542 }
3543
3544 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3545                                int peer)
3546 {
3547         struct net_device *dev;
3548         struct sock *sk = sock->sk;
3549
3550         if (peer)
3551                 return -EOPNOTSUPP;
3552
3553         uaddr->sa_family = AF_PACKET;
3554         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3555         rcu_read_lock();
3556         dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3557         if (dev)
3558                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3559         rcu_read_unlock();
3560
3561         return sizeof(*uaddr);
3562 }
3563
3564 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3565                           int peer)
3566 {
3567         struct net_device *dev;
3568         struct sock *sk = sock->sk;
3569         struct packet_sock *po = pkt_sk(sk);
3570         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3571         int ifindex;
3572
3573         if (peer)
3574                 return -EOPNOTSUPP;
3575
3576         ifindex = READ_ONCE(po->ifindex);
3577         sll->sll_family = AF_PACKET;
3578         sll->sll_ifindex = ifindex;
3579         sll->sll_protocol = READ_ONCE(po->num);
3580         sll->sll_pkttype = 0;
3581         rcu_read_lock();
3582         dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3583         if (dev) {
3584                 sll->sll_hatype = dev->type;
3585                 sll->sll_halen = dev->addr_len;
3586                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3587         } else {
3588                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3589                 sll->sll_halen = 0;
3590         }
3591         rcu_read_unlock();
3592
3593         return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3594 }
3595
3596 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3597                          int what)
3598 {
3599         switch (i->type) {
3600         case PACKET_MR_MULTICAST:
3601                 if (i->alen != dev->addr_len)
3602                         return -EINVAL;
3603                 if (what > 0)
3604                         return dev_mc_add(dev, i->addr);
3605                 else
3606                         return dev_mc_del(dev, i->addr);
3607                 break;
3608         case PACKET_MR_PROMISC:
3609                 return dev_set_promiscuity(dev, what);
3610         case PACKET_MR_ALLMULTI:
3611                 return dev_set_allmulti(dev, what);
3612         case PACKET_MR_UNICAST:
3613                 if (i->alen != dev->addr_len)
3614                         return -EINVAL;
3615                 if (what > 0)
3616                         return dev_uc_add(dev, i->addr);
3617                 else
3618                         return dev_uc_del(dev, i->addr);
3619                 break;
3620         default:
3621                 break;
3622         }
3623         return 0;
3624 }
3625
3626 static void packet_dev_mclist_delete(struct net_device *dev,
3627                                      struct packet_mclist **mlp)
3628 {
3629         struct packet_mclist *ml;
3630
3631         while ((ml = *mlp) != NULL) {
3632                 if (ml->ifindex == dev->ifindex) {
3633                         packet_dev_mc(dev, ml, -1);
3634                         *mlp = ml->next;
3635                         kfree(ml);
3636                 } else
3637                         mlp = &ml->next;
3638         }
3639 }
3640
3641 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3642 {
3643         struct packet_sock *po = pkt_sk(sk);
3644         struct packet_mclist *ml, *i;
3645         struct net_device *dev;
3646         int err;
3647
3648         rtnl_lock();
3649
3650         err = -ENODEV;
3651         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3652         if (!dev)
3653                 goto done;
3654
3655         err = -EINVAL;
3656         if (mreq->mr_alen > dev->addr_len)
3657                 goto done;
3658
3659         err = -ENOBUFS;
3660         i = kmalloc(sizeof(*i), GFP_KERNEL);
3661         if (i == NULL)
3662                 goto done;
3663
3664         err = 0;
3665         for (ml = po->mclist; ml; ml = ml->next) {
3666                 if (ml->ifindex == mreq->mr_ifindex &&
3667                     ml->type == mreq->mr_type &&
3668                     ml->alen == mreq->mr_alen &&
3669                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3670                         ml->count++;
3671                         /* Free the new element ... */
3672                         kfree(i);
3673                         goto done;
3674                 }
3675         }
3676
3677         i->type = mreq->mr_type;
3678         i->ifindex = mreq->mr_ifindex;
3679         i->alen = mreq->mr_alen;
3680         memcpy(i->addr, mreq->mr_address, i->alen);
3681         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3682         i->count = 1;
3683         i->next = po->mclist;
3684         po->mclist = i;
3685         err = packet_dev_mc(dev, i, 1);
3686         if (err) {
3687                 po->mclist = i->next;
3688                 kfree(i);
3689         }
3690
3691 done:
3692         rtnl_unlock();
3693         return err;
3694 }
3695
3696 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3697 {
3698         struct packet_mclist *ml, **mlp;
3699
3700         rtnl_lock();
3701
3702         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3703                 if (ml->ifindex == mreq->mr_ifindex &&
3704                     ml->type == mreq->mr_type &&
3705                     ml->alen == mreq->mr_alen &&
3706                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3707                         if (--ml->count == 0) {
3708                                 struct net_device *dev;
3709                                 *mlp = ml->next;
3710                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3711                                 if (dev)
3712                                         packet_dev_mc(dev, ml, -1);
3713                                 kfree(ml);
3714                         }
3715                         break;
3716                 }
3717         }
3718         rtnl_unlock();
3719         return 0;
3720 }
3721
3722 static void packet_flush_mclist(struct sock *sk)
3723 {
3724         struct packet_sock *po = pkt_sk(sk);
3725         struct packet_mclist *ml;
3726
3727         if (!po->mclist)
3728                 return;
3729
3730         rtnl_lock();
3731         while ((ml = po->mclist) != NULL) {
3732                 struct net_device *dev;
3733
3734                 po->mclist = ml->next;
3735                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3736                 if (dev != NULL)
3737                         packet_dev_mc(dev, ml, -1);
3738                 kfree(ml);
3739         }
3740         rtnl_unlock();
3741 }
3742
3743 static int
3744 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3745                   unsigned int optlen)
3746 {
3747         struct sock *sk = sock->sk;
3748         struct packet_sock *po = pkt_sk(sk);
3749         int ret;
3750
3751         if (level != SOL_PACKET)
3752                 return -ENOPROTOOPT;
3753
3754         switch (optname) {
3755         case PACKET_ADD_MEMBERSHIP:
3756         case PACKET_DROP_MEMBERSHIP:
3757         {
3758                 struct packet_mreq_max mreq;
3759                 int len = optlen;
3760                 memset(&mreq, 0, sizeof(mreq));
3761                 if (len < sizeof(struct packet_mreq))
3762                         return -EINVAL;
3763                 if (len > sizeof(mreq))
3764                         len = sizeof(mreq);
3765                 if (copy_from_sockptr(&mreq, optval, len))
3766                         return -EFAULT;
3767                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3768                         return -EINVAL;
3769                 if (optname == PACKET_ADD_MEMBERSHIP)
3770                         ret = packet_mc_add(sk, &mreq);
3771                 else
3772                         ret = packet_mc_drop(sk, &mreq);
3773                 return ret;
3774         }
3775
3776         case PACKET_RX_RING:
3777         case PACKET_TX_RING:
3778         {
3779                 union tpacket_req_u req_u;
3780                 int len;
3781
3782                 lock_sock(sk);
3783                 switch (po->tp_version) {
3784                 case TPACKET_V1:
3785                 case TPACKET_V2:
3786                         len = sizeof(req_u.req);
3787                         break;
3788                 case TPACKET_V3:
3789                 default:
3790                         len = sizeof(req_u.req3);
3791                         break;
3792                 }
3793                 if (optlen < len) {
3794                         ret = -EINVAL;
3795                 } else {
3796                         if (copy_from_sockptr(&req_u.req, optval, len))
3797                                 ret = -EFAULT;
3798                         else
3799                                 ret = packet_set_ring(sk, &req_u, 0,
3800                                                     optname == PACKET_TX_RING);
3801                 }
3802                 release_sock(sk);
3803                 return ret;
3804         }
3805         case PACKET_COPY_THRESH:
3806         {
3807                 int val;
3808
3809                 if (optlen != sizeof(val))
3810                         return -EINVAL;
3811                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3812                         return -EFAULT;
3813
3814                 pkt_sk(sk)->copy_thresh = val;
3815                 return 0;
3816         }
3817         case PACKET_VERSION:
3818         {
3819                 int val;
3820
3821                 if (optlen != sizeof(val))
3822                         return -EINVAL;
3823                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3824                         return -EFAULT;
3825                 switch (val) {
3826                 case TPACKET_V1:
3827                 case TPACKET_V2:
3828                 case TPACKET_V3:
3829                         break;
3830                 default:
3831                         return -EINVAL;
3832                 }
3833                 lock_sock(sk);
3834                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3835                         ret = -EBUSY;
3836                 } else {
3837                         po->tp_version = val;
3838                         ret = 0;
3839                 }
3840                 release_sock(sk);
3841                 return ret;
3842         }
3843         case PACKET_RESERVE:
3844         {
3845                 unsigned int val;
3846
3847                 if (optlen != sizeof(val))
3848                         return -EINVAL;
3849                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3850                         return -EFAULT;
3851                 if (val > INT_MAX)
3852                         return -EINVAL;
3853                 lock_sock(sk);
3854                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3855                         ret = -EBUSY;
3856                 } else {
3857                         po->tp_reserve = val;
3858                         ret = 0;
3859                 }
3860                 release_sock(sk);
3861                 return ret;
3862         }
3863         case PACKET_LOSS:
3864         {
3865                 unsigned int val;
3866
3867                 if (optlen != sizeof(val))
3868                         return -EINVAL;
3869                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3870                         return -EFAULT;
3871
3872                 lock_sock(sk);
3873                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3874                         ret = -EBUSY;
3875                 } else {
3876                         po->tp_loss = !!val;
3877                         ret = 0;
3878                 }
3879                 release_sock(sk);
3880                 return ret;
3881         }
3882         case PACKET_AUXDATA:
3883         {
3884                 int val;
3885
3886                 if (optlen < sizeof(val))
3887                         return -EINVAL;
3888                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3889                         return -EFAULT;
3890
3891                 lock_sock(sk);
3892                 po->auxdata = !!val;
3893                 release_sock(sk);
3894                 return 0;
3895         }
3896         case PACKET_ORIGDEV:
3897         {
3898                 int val;
3899
3900                 if (optlen < sizeof(val))
3901                         return -EINVAL;
3902                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3903                         return -EFAULT;
3904
3905                 lock_sock(sk);
3906                 po->origdev = !!val;
3907                 release_sock(sk);
3908                 return 0;
3909         }
3910         case PACKET_VNET_HDR:
3911         {
3912                 int val;
3913
3914                 if (sock->type != SOCK_RAW)
3915                         return -EINVAL;
3916                 if (optlen < sizeof(val))
3917                         return -EINVAL;
3918                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3919                         return -EFAULT;
3920
3921                 lock_sock(sk);
3922                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3923                         ret = -EBUSY;
3924                 } else {
3925                         po->has_vnet_hdr = !!val;
3926                         ret = 0;
3927                 }
3928                 release_sock(sk);
3929                 return ret;
3930         }
3931         case PACKET_TIMESTAMP:
3932         {
3933                 int val;
3934
3935                 if (optlen != sizeof(val))
3936                         return -EINVAL;
3937                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3938                         return -EFAULT;
3939
3940                 po->tp_tstamp = val;
3941                 return 0;
3942         }
3943         case PACKET_FANOUT:
3944         {
3945                 struct fanout_args args = { 0 };
3946
3947                 if (optlen != sizeof(int) && optlen != sizeof(args))
3948                         return -EINVAL;
3949                 if (copy_from_sockptr(&args, optval, optlen))
3950                         return -EFAULT;
3951
3952                 return fanout_add(sk, &args);
3953         }
3954         case PACKET_FANOUT_DATA:
3955         {
3956                 /* Paired with the WRITE_ONCE() in fanout_add() */
3957                 if (!READ_ONCE(po->fanout))
3958                         return -EINVAL;
3959
3960                 return fanout_set_data(po, optval, optlen);
3961         }
3962         case PACKET_IGNORE_OUTGOING:
3963         {
3964                 int val;
3965
3966                 if (optlen != sizeof(val))
3967                         return -EINVAL;
3968                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3969                         return -EFAULT;
3970                 if (val < 0 || val > 1)
3971                         return -EINVAL;
3972
3973                 po->prot_hook.ignore_outgoing = !!val;
3974                 return 0;
3975         }
3976         case PACKET_TX_HAS_OFF:
3977         {
3978                 unsigned int val;
3979
3980                 if (optlen != sizeof(val))
3981                         return -EINVAL;
3982                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3983                         return -EFAULT;
3984
3985                 lock_sock(sk);
3986                 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
3987                         po->tp_tx_has_off = !!val;
3988
3989                 release_sock(sk);
3990                 return 0;
3991         }
3992         case PACKET_QDISC_BYPASS:
3993         {
3994                 int val;
3995
3996                 if (optlen != sizeof(val))
3997                         return -EINVAL;
3998                 if (copy_from_sockptr(&val, optval, sizeof(val)))
3999                         return -EFAULT;
4000
4001                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
4002                 return 0;
4003         }
4004         default:
4005                 return -ENOPROTOOPT;
4006         }
4007 }
4008
4009 static int packet_getsockopt(struct socket *sock, int level, int optname,
4010                              char __user *optval, int __user *optlen)
4011 {
4012         int len;
4013         int val, lv = sizeof(val);
4014         struct sock *sk = sock->sk;
4015         struct packet_sock *po = pkt_sk(sk);
4016         void *data = &val;
4017         union tpacket_stats_u st;
4018         struct tpacket_rollover_stats rstats;
4019         int drops;
4020
4021         if (level != SOL_PACKET)
4022                 return -ENOPROTOOPT;
4023
4024         if (get_user(len, optlen))
4025                 return -EFAULT;
4026
4027         if (len < 0)
4028                 return -EINVAL;
4029
4030         switch (optname) {
4031         case PACKET_STATISTICS:
4032                 spin_lock_bh(&sk->sk_receive_queue.lock);
4033                 memcpy(&st, &po->stats, sizeof(st));
4034                 memset(&po->stats, 0, sizeof(po->stats));
4035                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4036                 drops = atomic_xchg(&po->tp_drops, 0);
4037
4038                 if (po->tp_version == TPACKET_V3) {
4039                         lv = sizeof(struct tpacket_stats_v3);
4040                         st.stats3.tp_drops = drops;
4041                         st.stats3.tp_packets += drops;
4042                         data = &st.stats3;
4043                 } else {
4044                         lv = sizeof(struct tpacket_stats);
4045                         st.stats1.tp_drops = drops;
4046                         st.stats1.tp_packets += drops;
4047                         data = &st.stats1;
4048                 }
4049
4050                 break;
4051         case PACKET_AUXDATA:
4052                 val = po->auxdata;
4053                 break;
4054         case PACKET_ORIGDEV:
4055                 val = po->origdev;
4056                 break;
4057         case PACKET_VNET_HDR:
4058                 val = po->has_vnet_hdr;
4059                 break;
4060         case PACKET_VERSION:
4061                 val = po->tp_version;
4062                 break;
4063         case PACKET_HDRLEN:
4064                 if (len > sizeof(int))
4065                         len = sizeof(int);
4066                 if (len < sizeof(int))
4067                         return -EINVAL;
4068                 if (copy_from_user(&val, optval, len))
4069                         return -EFAULT;
4070                 switch (val) {
4071                 case TPACKET_V1:
4072                         val = sizeof(struct tpacket_hdr);
4073                         break;
4074                 case TPACKET_V2:
4075                         val = sizeof(struct tpacket2_hdr);
4076                         break;
4077                 case TPACKET_V3:
4078                         val = sizeof(struct tpacket3_hdr);
4079                         break;
4080                 default:
4081                         return -EINVAL;
4082                 }
4083                 break;
4084         case PACKET_RESERVE:
4085                 val = po->tp_reserve;
4086                 break;
4087         case PACKET_LOSS:
4088                 val = po->tp_loss;
4089                 break;
4090         case PACKET_TIMESTAMP:
4091                 val = po->tp_tstamp;
4092                 break;
4093         case PACKET_FANOUT:
4094                 val = (po->fanout ?
4095                        ((u32)po->fanout->id |
4096                         ((u32)po->fanout->type << 16) |
4097                         ((u32)po->fanout->flags << 24)) :
4098                        0);
4099                 break;
4100         case PACKET_IGNORE_OUTGOING:
4101                 val = po->prot_hook.ignore_outgoing;
4102                 break;
4103         case PACKET_ROLLOVER_STATS:
4104                 if (!po->rollover)
4105                         return -EINVAL;
4106                 rstats.tp_all = atomic_long_read(&po->rollover->num);
4107                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4108                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4109                 data = &rstats;
4110                 lv = sizeof(rstats);
4111                 break;
4112         case PACKET_TX_HAS_OFF:
4113                 val = po->tp_tx_has_off;
4114                 break;
4115         case PACKET_QDISC_BYPASS:
4116                 val = packet_use_direct_xmit(po);
4117                 break;
4118         default:
4119                 return -ENOPROTOOPT;
4120         }
4121
4122         if (len > lv)
4123                 len = lv;
4124         if (put_user(len, optlen))
4125                 return -EFAULT;
4126         if (copy_to_user(optval, data, len))
4127                 return -EFAULT;
4128         return 0;
4129 }
4130
4131 static int packet_notifier(struct notifier_block *this,
4132                            unsigned long msg, void *ptr)
4133 {
4134         struct sock *sk;
4135         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4136         struct net *net = dev_net(dev);
4137
4138         rcu_read_lock();
4139         sk_for_each_rcu(sk, &net->packet.sklist) {
4140                 struct packet_sock *po = pkt_sk(sk);
4141
4142                 switch (msg) {
4143                 case NETDEV_UNREGISTER:
4144                         if (po->mclist)
4145                                 packet_dev_mclist_delete(dev, &po->mclist);
4146                         fallthrough;
4147
4148                 case NETDEV_DOWN:
4149                         if (dev->ifindex == po->ifindex) {
4150                                 spin_lock(&po->bind_lock);
4151                                 if (po->running) {
4152                                         __unregister_prot_hook(sk, false);
4153                                         sk->sk_err = ENETDOWN;
4154                                         if (!sock_flag(sk, SOCK_DEAD))
4155                                                 sk_error_report(sk);
4156                                 }
4157                                 if (msg == NETDEV_UNREGISTER) {
4158                                         packet_cached_dev_reset(po);
4159                                         WRITE_ONCE(po->ifindex, -1);
4160                                         dev_put_track(po->prot_hook.dev,
4161                                                       &po->prot_hook.dev_tracker);
4162                                         po->prot_hook.dev = NULL;
4163                                 }
4164                                 spin_unlock(&po->bind_lock);
4165                         }
4166                         break;
4167                 case NETDEV_UP:
4168                         if (dev->ifindex == po->ifindex) {
4169                                 spin_lock(&po->bind_lock);
4170                                 if (po->num)
4171                                         register_prot_hook(sk);
4172                                 spin_unlock(&po->bind_lock);
4173                         }
4174                         break;
4175                 }
4176         }
4177         rcu_read_unlock();
4178         return NOTIFY_DONE;
4179 }
4180
4181
4182 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4183                         unsigned long arg)
4184 {
4185         struct sock *sk = sock->sk;
4186
4187         switch (cmd) {
4188         case SIOCOUTQ:
4189         {
4190                 int amount = sk_wmem_alloc_get(sk);
4191
4192                 return put_user(amount, (int __user *)arg);
4193         }
4194         case SIOCINQ:
4195         {
4196                 struct sk_buff *skb;
4197                 int amount = 0;
4198
4199                 spin_lock_bh(&sk->sk_receive_queue.lock);
4200                 skb = skb_peek(&sk->sk_receive_queue);
4201                 if (skb)
4202                         amount = skb->len;
4203                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4204                 return put_user(amount, (int __user *)arg);
4205         }
4206 #ifdef CONFIG_INET
4207         case SIOCADDRT:
4208         case SIOCDELRT:
4209         case SIOCDARP:
4210         case SIOCGARP:
4211         case SIOCSARP:
4212         case SIOCGIFADDR:
4213         case SIOCSIFADDR:
4214         case SIOCGIFBRDADDR:
4215         case SIOCSIFBRDADDR:
4216         case SIOCGIFNETMASK:
4217         case SIOCSIFNETMASK:
4218         case SIOCGIFDSTADDR:
4219         case SIOCSIFDSTADDR:
4220         case SIOCSIFFLAGS:
4221                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4222 #endif
4223
4224         default:
4225                 return -ENOIOCTLCMD;
4226         }
4227         return 0;
4228 }
4229
4230 static __poll_t packet_poll(struct file *file, struct socket *sock,
4231                                 poll_table *wait)
4232 {
4233         struct sock *sk = sock->sk;
4234         struct packet_sock *po = pkt_sk(sk);
4235         __poll_t mask = datagram_poll(file, sock, wait);
4236
4237         spin_lock_bh(&sk->sk_receive_queue.lock);
4238         if (po->rx_ring.pg_vec) {
4239                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4240                         TP_STATUS_KERNEL))
4241                         mask |= EPOLLIN | EPOLLRDNORM;
4242         }
4243         packet_rcv_try_clear_pressure(po);
4244         spin_unlock_bh(&sk->sk_receive_queue.lock);
4245         spin_lock_bh(&sk->sk_write_queue.lock);
4246         if (po->tx_ring.pg_vec) {
4247                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4248                         mask |= EPOLLOUT | EPOLLWRNORM;
4249         }
4250         spin_unlock_bh(&sk->sk_write_queue.lock);
4251         return mask;
4252 }
4253
4254
4255 /* Dirty? Well, I still did not learn better way to account
4256  * for user mmaps.
4257  */
4258
4259 static void packet_mm_open(struct vm_area_struct *vma)
4260 {
4261         struct file *file = vma->vm_file;
4262         struct socket *sock = file->private_data;
4263         struct sock *sk = sock->sk;
4264
4265         if (sk)
4266                 atomic_inc(&pkt_sk(sk)->mapped);
4267 }
4268
4269 static void packet_mm_close(struct vm_area_struct *vma)
4270 {
4271         struct file *file = vma->vm_file;
4272         struct socket *sock = file->private_data;
4273         struct sock *sk = sock->sk;
4274
4275         if (sk)
4276                 atomic_dec(&pkt_sk(sk)->mapped);
4277 }
4278
4279 static const struct vm_operations_struct packet_mmap_ops = {
4280         .open   =       packet_mm_open,
4281         .close  =       packet_mm_close,
4282 };
4283
4284 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4285                         unsigned int len)
4286 {
4287         int i;
4288
4289         for (i = 0; i < len; i++) {
4290                 if (likely(pg_vec[i].buffer)) {
4291                         if (is_vmalloc_addr(pg_vec[i].buffer))
4292                                 vfree(pg_vec[i].buffer);
4293                         else
4294                                 free_pages((unsigned long)pg_vec[i].buffer,
4295                                            order);
4296                         pg_vec[i].buffer = NULL;
4297                 }
4298         }
4299         kfree(pg_vec);
4300 }
4301
4302 static char *alloc_one_pg_vec_page(unsigned long order)
4303 {
4304         char *buffer;
4305         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4306                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4307
4308         buffer = (char *) __get_free_pages(gfp_flags, order);
4309         if (buffer)
4310                 return buffer;
4311
4312         /* __get_free_pages failed, fall back to vmalloc */
4313         buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4314         if (buffer)
4315                 return buffer;
4316
4317         /* vmalloc failed, lets dig into swap here */
4318         gfp_flags &= ~__GFP_NORETRY;
4319         buffer = (char *) __get_free_pages(gfp_flags, order);
4320         if (buffer)
4321                 return buffer;
4322
4323         /* complete and utter failure */
4324         return NULL;
4325 }
4326
4327 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4328 {
4329         unsigned int block_nr = req->tp_block_nr;
4330         struct pgv *pg_vec;
4331         int i;
4332
4333         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4334         if (unlikely(!pg_vec))
4335                 goto out;
4336
4337         for (i = 0; i < block_nr; i++) {
4338                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4339                 if (unlikely(!pg_vec[i].buffer))
4340                         goto out_free_pgvec;
4341         }
4342
4343 out:
4344         return pg_vec;
4345
4346 out_free_pgvec:
4347         free_pg_vec(pg_vec, order, block_nr);
4348         pg_vec = NULL;
4349         goto out;
4350 }
4351
4352 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4353                 int closing, int tx_ring)
4354 {
4355         struct pgv *pg_vec = NULL;
4356         struct packet_sock *po = pkt_sk(sk);
4357         unsigned long *rx_owner_map = NULL;
4358         int was_running, order = 0;
4359         struct packet_ring_buffer *rb;
4360         struct sk_buff_head *rb_queue;
4361         __be16 num;
4362         int err;
4363         /* Added to avoid minimal code churn */
4364         struct tpacket_req *req = &req_u->req;
4365
4366         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4367         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4368
4369         err = -EBUSY;
4370         if (!closing) {
4371                 if (atomic_read(&po->mapped))
4372                         goto out;
4373                 if (packet_read_pending(rb))
4374                         goto out;
4375         }
4376
4377         if (req->tp_block_nr) {
4378                 unsigned int min_frame_size;
4379
4380                 /* Sanity tests and some calculations */
4381                 err = -EBUSY;
4382                 if (unlikely(rb->pg_vec))
4383                         goto out;
4384
4385                 switch (po->tp_version) {
4386                 case TPACKET_V1:
4387                         po->tp_hdrlen = TPACKET_HDRLEN;
4388                         break;
4389                 case TPACKET_V2:
4390                         po->tp_hdrlen = TPACKET2_HDRLEN;
4391                         break;
4392                 case TPACKET_V3:
4393                         po->tp_hdrlen = TPACKET3_HDRLEN;
4394                         break;
4395                 }
4396
4397                 err = -EINVAL;
4398                 if (unlikely((int)req->tp_block_size <= 0))
4399                         goto out;
4400                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4401                         goto out;
4402                 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4403                 if (po->tp_version >= TPACKET_V3 &&
4404                     req->tp_block_size <
4405                     BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4406                         goto out;
4407                 if (unlikely(req->tp_frame_size < min_frame_size))
4408                         goto out;
4409                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4410                         goto out;
4411
4412                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4413                 if (unlikely(rb->frames_per_block == 0))
4414                         goto out;
4415                 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4416                         goto out;
4417                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4418                                         req->tp_frame_nr))
4419                         goto out;
4420
4421                 err = -ENOMEM;
4422                 order = get_order(req->tp_block_size);
4423                 pg_vec = alloc_pg_vec(req, order);
4424                 if (unlikely(!pg_vec))
4425                         goto out;
4426                 switch (po->tp_version) {
4427                 case TPACKET_V3:
4428                         /* Block transmit is not supported yet */
4429                         if (!tx_ring) {
4430                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4431                         } else {
4432                                 struct tpacket_req3 *req3 = &req_u->req3;
4433
4434                                 if (req3->tp_retire_blk_tov ||
4435                                     req3->tp_sizeof_priv ||
4436                                     req3->tp_feature_req_word) {
4437                                         err = -EINVAL;
4438                                         goto out_free_pg_vec;
4439                                 }
4440                         }
4441                         break;
4442                 default:
4443                         if (!tx_ring) {
4444                                 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4445                                         GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4446                                 if (!rx_owner_map)
4447                                         goto out_free_pg_vec;
4448                         }
4449                         break;
4450                 }
4451         }
4452         /* Done */
4453         else {
4454                 err = -EINVAL;
4455                 if (unlikely(req->tp_frame_nr))
4456                         goto out;
4457         }
4458
4459
4460         /* Detach socket from network */
4461         spin_lock(&po->bind_lock);
4462         was_running = po->running;
4463         num = po->num;
4464         if (was_running) {
4465                 WRITE_ONCE(po->num, 0);
4466                 __unregister_prot_hook(sk, false);
4467         }
4468         spin_unlock(&po->bind_lock);
4469
4470         synchronize_net();
4471
4472         err = -EBUSY;
4473         mutex_lock(&po->pg_vec_lock);
4474         if (closing || atomic_read(&po->mapped) == 0) {
4475                 err = 0;
4476                 spin_lock_bh(&rb_queue->lock);
4477                 swap(rb->pg_vec, pg_vec);
4478                 if (po->tp_version <= TPACKET_V2)
4479                         swap(rb->rx_owner_map, rx_owner_map);
4480                 rb->frame_max = (req->tp_frame_nr - 1);
4481                 rb->head = 0;
4482                 rb->frame_size = req->tp_frame_size;
4483                 spin_unlock_bh(&rb_queue->lock);
4484
4485                 swap(rb->pg_vec_order, order);
4486                 swap(rb->pg_vec_len, req->tp_block_nr);
4487
4488                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4489                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4490                                                 tpacket_rcv : packet_rcv;
4491                 skb_queue_purge(rb_queue);
4492                 if (atomic_read(&po->mapped))
4493                         pr_err("packet_mmap: vma is busy: %d\n",
4494                                atomic_read(&po->mapped));
4495         }
4496         mutex_unlock(&po->pg_vec_lock);
4497
4498         spin_lock(&po->bind_lock);
4499         if (was_running) {
4500                 WRITE_ONCE(po->num, num);
4501                 register_prot_hook(sk);
4502         }
4503         spin_unlock(&po->bind_lock);
4504         if (pg_vec && (po->tp_version > TPACKET_V2)) {
4505                 /* Because we don't support block-based V3 on tx-ring */
4506                 if (!tx_ring)
4507                         prb_shutdown_retire_blk_timer(po, rb_queue);
4508         }
4509
4510 out_free_pg_vec:
4511         if (pg_vec) {
4512                 bitmap_free(rx_owner_map);
4513                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4514         }
4515 out:
4516         return err;
4517 }
4518
4519 static int packet_mmap(struct file *file, struct socket *sock,
4520                 struct vm_area_struct *vma)
4521 {
4522         struct sock *sk = sock->sk;
4523         struct packet_sock *po = pkt_sk(sk);
4524         unsigned long size, expected_size;
4525         struct packet_ring_buffer *rb;
4526         unsigned long start;
4527         int err = -EINVAL;
4528         int i;
4529
4530         if (vma->vm_pgoff)
4531                 return -EINVAL;
4532
4533         mutex_lock(&po->pg_vec_lock);
4534
4535         expected_size = 0;
4536         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4537                 if (rb->pg_vec) {
4538                         expected_size += rb->pg_vec_len
4539                                                 * rb->pg_vec_pages
4540                                                 * PAGE_SIZE;
4541                 }
4542         }
4543
4544         if (expected_size == 0)
4545                 goto out;
4546
4547         size = vma->vm_end - vma->vm_start;
4548         if (size != expected_size)
4549                 goto out;
4550
4551         start = vma->vm_start;
4552         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4553                 if (rb->pg_vec == NULL)
4554                         continue;
4555
4556                 for (i = 0; i < rb->pg_vec_len; i++) {
4557                         struct page *page;
4558                         void *kaddr = rb->pg_vec[i].buffer;
4559                         int pg_num;
4560
4561                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4562                                 page = pgv_to_page(kaddr);
4563                                 err = vm_insert_page(vma, start, page);
4564                                 if (unlikely(err))
4565                                         goto out;
4566                                 start += PAGE_SIZE;
4567                                 kaddr += PAGE_SIZE;
4568                         }
4569                 }
4570         }
4571
4572         atomic_inc(&po->mapped);
4573         vma->vm_ops = &packet_mmap_ops;
4574         err = 0;
4575
4576 out:
4577         mutex_unlock(&po->pg_vec_lock);
4578         return err;
4579 }
4580
4581 static const struct proto_ops packet_ops_spkt = {
4582         .family =       PF_PACKET,
4583         .owner =        THIS_MODULE,
4584         .release =      packet_release,
4585         .bind =         packet_bind_spkt,
4586         .connect =      sock_no_connect,
4587         .socketpair =   sock_no_socketpair,
4588         .accept =       sock_no_accept,
4589         .getname =      packet_getname_spkt,
4590         .poll =         datagram_poll,
4591         .ioctl =        packet_ioctl,
4592         .gettstamp =    sock_gettstamp,
4593         .listen =       sock_no_listen,
4594         .shutdown =     sock_no_shutdown,
4595         .sendmsg =      packet_sendmsg_spkt,
4596         .recvmsg =      packet_recvmsg,
4597         .mmap =         sock_no_mmap,
4598         .sendpage =     sock_no_sendpage,
4599 };
4600
4601 static const struct proto_ops packet_ops = {
4602         .family =       PF_PACKET,
4603         .owner =        THIS_MODULE,
4604         .release =      packet_release,
4605         .bind =         packet_bind,
4606         .connect =      sock_no_connect,
4607         .socketpair =   sock_no_socketpair,
4608         .accept =       sock_no_accept,
4609         .getname =      packet_getname,
4610         .poll =         packet_poll,
4611         .ioctl =        packet_ioctl,
4612         .gettstamp =    sock_gettstamp,
4613         .listen =       sock_no_listen,
4614         .shutdown =     sock_no_shutdown,
4615         .setsockopt =   packet_setsockopt,
4616         .getsockopt =   packet_getsockopt,
4617         .sendmsg =      packet_sendmsg,
4618         .recvmsg =      packet_recvmsg,
4619         .mmap =         packet_mmap,
4620         .sendpage =     sock_no_sendpage,
4621 };
4622
4623 static const struct net_proto_family packet_family_ops = {
4624         .family =       PF_PACKET,
4625         .create =       packet_create,
4626         .owner  =       THIS_MODULE,
4627 };
4628
4629 static struct notifier_block packet_netdev_notifier = {
4630         .notifier_call =        packet_notifier,
4631 };
4632
4633 #ifdef CONFIG_PROC_FS
4634
4635 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4636         __acquires(RCU)
4637 {
4638         struct net *net = seq_file_net(seq);
4639
4640         rcu_read_lock();
4641         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4642 }
4643
4644 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4645 {
4646         struct net *net = seq_file_net(seq);
4647         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4648 }
4649
4650 static void packet_seq_stop(struct seq_file *seq, void *v)
4651         __releases(RCU)
4652 {
4653         rcu_read_unlock();
4654 }
4655
4656 static int packet_seq_show(struct seq_file *seq, void *v)
4657 {
4658         if (v == SEQ_START_TOKEN)
4659                 seq_printf(seq,
4660                            "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4661                            IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4662         else {
4663                 struct sock *s = sk_entry(v);
4664                 const struct packet_sock *po = pkt_sk(s);
4665
4666                 seq_printf(seq,
4667                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4668                            s,
4669                            refcount_read(&s->sk_refcnt),
4670                            s->sk_type,
4671                            ntohs(READ_ONCE(po->num)),
4672                            READ_ONCE(po->ifindex),
4673                            po->running,
4674                            atomic_read(&s->sk_rmem_alloc),
4675                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4676                            sock_i_ino(s));
4677         }
4678
4679         return 0;
4680 }
4681
4682 static const struct seq_operations packet_seq_ops = {
4683         .start  = packet_seq_start,
4684         .next   = packet_seq_next,
4685         .stop   = packet_seq_stop,
4686         .show   = packet_seq_show,
4687 };
4688 #endif
4689
4690 static int __net_init packet_net_init(struct net *net)
4691 {
4692         mutex_init(&net->packet.sklist_lock);
4693         INIT_HLIST_HEAD(&net->packet.sklist);
4694
4695 #ifdef CONFIG_PROC_FS
4696         if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4697                         sizeof(struct seq_net_private)))
4698                 return -ENOMEM;
4699 #endif /* CONFIG_PROC_FS */
4700
4701         return 0;
4702 }
4703
4704 static void __net_exit packet_net_exit(struct net *net)
4705 {
4706         remove_proc_entry("packet", net->proc_net);
4707         WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4708 }
4709
4710 static struct pernet_operations packet_net_ops = {
4711         .init = packet_net_init,
4712         .exit = packet_net_exit,
4713 };
4714
4715
4716 static void __exit packet_exit(void)
4717 {
4718         unregister_netdevice_notifier(&packet_netdev_notifier);
4719         unregister_pernet_subsys(&packet_net_ops);
4720         sock_unregister(PF_PACKET);
4721         proto_unregister(&packet_proto);
4722 }
4723
4724 static int __init packet_init(void)
4725 {
4726         int rc;
4727
4728         rc = proto_register(&packet_proto, 0);
4729         if (rc)
4730                 goto out;
4731         rc = sock_register(&packet_family_ops);
4732         if (rc)
4733                 goto out_proto;
4734         rc = register_pernet_subsys(&packet_net_ops);
4735         if (rc)
4736                 goto out_sock;
4737         rc = register_netdevice_notifier(&packet_netdev_notifier);
4738         if (rc)
4739                 goto out_pernet;
4740
4741         return 0;
4742
4743 out_pernet:
4744         unregister_pernet_subsys(&packet_net_ops);
4745 out_sock:
4746         sock_unregister(PF_PACKET);
4747 out_proto:
4748         proto_unregister(&packet_proto);
4749 out:
4750         return rc;
4751 }
4752
4753 module_init(packet_init);
4754 module_exit(packet_exit);
4755 MODULE_LICENSE("GPL");
4756 MODULE_ALIAS_NETPROTO(PF_PACKET);