packet: in recvmsg msg_name return at least sizeof sockaddr_ll
[linux-2.6-microblaze.git] / net / packet / af_packet.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              PACKET - implements raw packet sockets.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
11  *
12  * Fixes:
13  *              Alan Cox        :       verify_area() now used correctly
14  *              Alan Cox        :       new skbuff lists, look ma no backlogs!
15  *              Alan Cox        :       tidied skbuff lists.
16  *              Alan Cox        :       Now uses generic datagram routines I
17  *                                      added. Also fixed the peek/read crash
18  *                                      from all old Linux datagram code.
19  *              Alan Cox        :       Uses the improved datagram code.
20  *              Alan Cox        :       Added NULL's for socket options.
21  *              Alan Cox        :       Re-commented the code.
22  *              Alan Cox        :       Use new kernel side addressing
23  *              Rob Janssen     :       Correct MTU usage.
24  *              Dave Platt      :       Counter leaks caused by incorrect
25  *                                      interrupt locking and some slightly
26  *                                      dubious gcc output. Can you read
27  *                                      compiler: it said _VOLATILE_
28  *      Richard Kooijman        :       Timestamp fixes.
29  *              Alan Cox        :       New buffers. Use sk->mac.raw.
30  *              Alan Cox        :       sendmsg/recvmsg support.
31  *              Alan Cox        :       Protocol setting support
32  *      Alexey Kuznetsov        :       Untied from IPv4 stack.
33  *      Cyrus Durgin            :       Fixed kerneld for kmod.
34  *      Michal Ostrowski        :       Module initialization cleanup.
35  *         Ulises Alonso        :       Frame number limit removal and
36  *                                      packet_set_ring memory leak.
37  *              Eric Biederman  :       Allow for > 8 byte hardware addresses.
38  *                                      The convention is that longer addresses
39  *                                      will simply extend the hardware address
40  *                                      byte arrays at the end of sockaddr_ll
41  *                                      and packet_mreq.
42  *              Johann Baudy    :       Added TX RING.
43  *              Chetan Loke     :       Implemented TPACKET_V3 block abstraction
44  *                                      layer.
45  *                                      Copyright (C) 2011, <lokec@ccs.neu.edu>
46  *
47  *
48  *              This program is free software; you can redistribute it and/or
49  *              modify it under the terms of the GNU General Public License
50  *              as published by the Free Software Foundation; either version
51  *              2 of the License, or (at your option) any later version.
52  *
53  */
54
55 #include <linux/types.h>
56 #include <linux/mm.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
70 #include <net/ip.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
73 #include <net/sock.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <linux/uaccess.h>
77 #include <asm/ioctls.h>
78 #include <asm/page.h>
79 #include <asm/cacheflush.h>
80 #include <asm/io.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
91 #include <linux/percpu.h>
92 #ifdef CONFIG_INET
93 #include <net/inet_common.h>
94 #endif
95 #include <linux/bpf.h>
96 #include <net/compat.h>
97
98 #include "internal.h"
99
100 /*
101    Assumptions:
102    - if device has no dev->hard_header routine, it adds and removes ll header
103      inside itself. In this case ll header is invisible outside of device,
104      but higher levels still should reserve dev->hard_header_len.
105      Some devices are enough clever to reallocate skb, when header
106      will not fit to reserved space (tunnel), another ones are silly
107      (PPP).
108    - packet socket receives packets with pulled ll header,
109      so that SOCK_RAW should push it back.
110
111 On receive:
112 -----------
113
114 Incoming, dev->hard_header!=NULL
115    mac_header -> ll header
116    data       -> data
117
118 Outgoing, dev->hard_header!=NULL
119    mac_header -> ll header
120    data       -> ll header
121
122 Incoming, dev->hard_header==NULL
123    mac_header -> UNKNOWN position. It is very likely, that it points to ll
124                  header.  PPP makes it, that is wrong, because introduce
125                  assymetry between rx and tx paths.
126    data       -> data
127
128 Outgoing, dev->hard_header==NULL
129    mac_header -> data. ll header is still not built!
130    data       -> data
131
132 Resume
133   If dev->hard_header==NULL we are unlikely to restore sensible ll header.
134
135
136 On transmit:
137 ------------
138
139 dev->hard_header != NULL
140    mac_header -> ll header
141    data       -> ll header
142
143 dev->hard_header == NULL (ll header is added by device, we cannot control it)
144    mac_header -> data
145    data       -> data
146
147    We should set nh.raw on output to correct posistion,
148    packet classifier depends on it.
149  */
150
151 /* Private packet socket structures. */
152
153 /* identical to struct packet_mreq except it has
154  * a longer address field.
155  */
156 struct packet_mreq_max {
157         int             mr_ifindex;
158         unsigned short  mr_type;
159         unsigned short  mr_alen;
160         unsigned char   mr_address[MAX_ADDR_LEN];
161 };
162
163 union tpacket_uhdr {
164         struct tpacket_hdr  *h1;
165         struct tpacket2_hdr *h2;
166         struct tpacket3_hdr *h3;
167         void *raw;
168 };
169
170 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171                 int closing, int tx_ring);
172
173 #define V3_ALIGNMENT    (8)
174
175 #define BLK_HDR_LEN     (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176
177 #define BLK_PLUS_PRIV(sz_of_priv) \
178         (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179
180 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181 #define BLOCK_NUM_PKTS(x)       ((x)->hdr.bh1.num_pkts)
182 #define BLOCK_O2FP(x)           ((x)->hdr.bh1.offset_to_first_pkt)
183 #define BLOCK_LEN(x)            ((x)->hdr.bh1.blk_len)
184 #define BLOCK_SNUM(x)           ((x)->hdr.bh1.seq_num)
185 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186 #define BLOCK_PRIV(x)           ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187
188 struct packet_sock;
189 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
190                        struct packet_type *pt, struct net_device *orig_dev);
191
192 static void *packet_previous_frame(struct packet_sock *po,
193                 struct packet_ring_buffer *rb,
194                 int status);
195 static void packet_increment_head(struct packet_ring_buffer *buff);
196 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
197 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
198                         struct packet_sock *);
199 static void prb_retire_current_block(struct tpacket_kbdq_core *,
200                 struct packet_sock *, unsigned int status);
201 static int prb_queue_frozen(struct tpacket_kbdq_core *);
202 static void prb_open_block(struct tpacket_kbdq_core *,
203                 struct tpacket_block_desc *);
204 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
205 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
206 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208                 struct tpacket3_hdr *);
209 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210                 struct tpacket3_hdr *);
211 static void packet_flush_mclist(struct sock *sk);
212 static u16 packet_pick_tx_queue(struct sk_buff *skb);
213
214 struct packet_skb_cb {
215         union {
216                 struct sockaddr_pkt pkt;
217                 union {
218                         /* Trick: alias skb original length with
219                          * ll.sll_family and ll.protocol in order
220                          * to save room.
221                          */
222                         unsigned int origlen;
223                         struct sockaddr_ll ll;
224                 };
225         } sa;
226 };
227
228 #define vio_le() virtio_legacy_is_little_endian()
229
230 #define PACKET_SKB_CB(__skb)    ((struct packet_skb_cb *)((__skb)->cb))
231
232 #define GET_PBDQC_FROM_RB(x)    ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233 #define GET_PBLOCK_DESC(x, bid) \
234         ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x)       \
236         ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237 #define GET_NEXT_PRB_BLK_NUM(x) \
238         (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239         ((x)->kactive_blk_num+1) : 0)
240
241 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
242 static void __fanout_link(struct sock *sk, struct packet_sock *po);
243
244 static int packet_direct_xmit(struct sk_buff *skb)
245 {
246         return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
247 }
248
249 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
250 {
251         struct net_device *dev;
252
253         rcu_read_lock();
254         dev = rcu_dereference(po->cached_dev);
255         if (likely(dev))
256                 dev_hold(dev);
257         rcu_read_unlock();
258
259         return dev;
260 }
261
262 static void packet_cached_dev_assign(struct packet_sock *po,
263                                      struct net_device *dev)
264 {
265         rcu_assign_pointer(po->cached_dev, dev);
266 }
267
268 static void packet_cached_dev_reset(struct packet_sock *po)
269 {
270         RCU_INIT_POINTER(po->cached_dev, NULL);
271 }
272
273 static bool packet_use_direct_xmit(const struct packet_sock *po)
274 {
275         return po->xmit == packet_direct_xmit;
276 }
277
278 static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
279                                   struct net_device *sb_dev)
280 {
281         return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
282 }
283
284 static u16 packet_pick_tx_queue(struct sk_buff *skb)
285 {
286         struct net_device *dev = skb->dev;
287         const struct net_device_ops *ops = dev->netdev_ops;
288         u16 queue_index;
289
290         if (ops->ndo_select_queue) {
291                 queue_index = ops->ndo_select_queue(dev, skb, NULL,
292                                                     __packet_pick_tx_queue);
293                 queue_index = netdev_cap_txqueue(dev, queue_index);
294         } else {
295                 queue_index = __packet_pick_tx_queue(dev, skb, NULL);
296         }
297
298         return queue_index;
299 }
300
301 /* __register_prot_hook must be invoked through register_prot_hook
302  * or from a context in which asynchronous accesses to the packet
303  * socket is not possible (packet_create()).
304  */
305 static void __register_prot_hook(struct sock *sk)
306 {
307         struct packet_sock *po = pkt_sk(sk);
308
309         if (!po->running) {
310                 if (po->fanout)
311                         __fanout_link(sk, po);
312                 else
313                         dev_add_pack(&po->prot_hook);
314
315                 sock_hold(sk);
316                 po->running = 1;
317         }
318 }
319
320 static void register_prot_hook(struct sock *sk)
321 {
322         lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
323         __register_prot_hook(sk);
324 }
325
326 /* If the sync parameter is true, we will temporarily drop
327  * the po->bind_lock and do a synchronize_net to make sure no
328  * asynchronous packet processing paths still refer to the elements
329  * of po->prot_hook.  If the sync parameter is false, it is the
330  * callers responsibility to take care of this.
331  */
332 static void __unregister_prot_hook(struct sock *sk, bool sync)
333 {
334         struct packet_sock *po = pkt_sk(sk);
335
336         lockdep_assert_held_once(&po->bind_lock);
337
338         po->running = 0;
339
340         if (po->fanout)
341                 __fanout_unlink(sk, po);
342         else
343                 __dev_remove_pack(&po->prot_hook);
344
345         __sock_put(sk);
346
347         if (sync) {
348                 spin_unlock(&po->bind_lock);
349                 synchronize_net();
350                 spin_lock(&po->bind_lock);
351         }
352 }
353
354 static void unregister_prot_hook(struct sock *sk, bool sync)
355 {
356         struct packet_sock *po = pkt_sk(sk);
357
358         if (po->running)
359                 __unregister_prot_hook(sk, sync);
360 }
361
362 static inline struct page * __pure pgv_to_page(void *addr)
363 {
364         if (is_vmalloc_addr(addr))
365                 return vmalloc_to_page(addr);
366         return virt_to_page(addr);
367 }
368
369 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
370 {
371         union tpacket_uhdr h;
372
373         h.raw = frame;
374         switch (po->tp_version) {
375         case TPACKET_V1:
376                 h.h1->tp_status = status;
377                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
378                 break;
379         case TPACKET_V2:
380                 h.h2->tp_status = status;
381                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
382                 break;
383         case TPACKET_V3:
384                 h.h3->tp_status = status;
385                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
386                 break;
387         default:
388                 WARN(1, "TPACKET version not supported.\n");
389                 BUG();
390         }
391
392         smp_wmb();
393 }
394
395 static int __packet_get_status(struct packet_sock *po, void *frame)
396 {
397         union tpacket_uhdr h;
398
399         smp_rmb();
400
401         h.raw = frame;
402         switch (po->tp_version) {
403         case TPACKET_V1:
404                 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
405                 return h.h1->tp_status;
406         case TPACKET_V2:
407                 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
408                 return h.h2->tp_status;
409         case TPACKET_V3:
410                 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
411                 return h.h3->tp_status;
412         default:
413                 WARN(1, "TPACKET version not supported.\n");
414                 BUG();
415                 return 0;
416         }
417 }
418
419 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
420                                    unsigned int flags)
421 {
422         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
423
424         if (shhwtstamps &&
425             (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
426             ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
427                 return TP_STATUS_TS_RAW_HARDWARE;
428
429         if (ktime_to_timespec_cond(skb->tstamp, ts))
430                 return TP_STATUS_TS_SOFTWARE;
431
432         return 0;
433 }
434
435 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
436                                     struct sk_buff *skb)
437 {
438         union tpacket_uhdr h;
439         struct timespec ts;
440         __u32 ts_status;
441
442         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
443                 return 0;
444
445         h.raw = frame;
446         switch (po->tp_version) {
447         case TPACKET_V1:
448                 h.h1->tp_sec = ts.tv_sec;
449                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
450                 break;
451         case TPACKET_V2:
452                 h.h2->tp_sec = ts.tv_sec;
453                 h.h2->tp_nsec = ts.tv_nsec;
454                 break;
455         case TPACKET_V3:
456                 h.h3->tp_sec = ts.tv_sec;
457                 h.h3->tp_nsec = ts.tv_nsec;
458                 break;
459         default:
460                 WARN(1, "TPACKET version not supported.\n");
461                 BUG();
462         }
463
464         /* one flush is safe, as both fields always lie on the same cacheline */
465         flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
466         smp_wmb();
467
468         return ts_status;
469 }
470
471 static void *packet_lookup_frame(struct packet_sock *po,
472                 struct packet_ring_buffer *rb,
473                 unsigned int position,
474                 int status)
475 {
476         unsigned int pg_vec_pos, frame_offset;
477         union tpacket_uhdr h;
478
479         pg_vec_pos = position / rb->frames_per_block;
480         frame_offset = position % rb->frames_per_block;
481
482         h.raw = rb->pg_vec[pg_vec_pos].buffer +
483                 (frame_offset * rb->frame_size);
484
485         if (status != __packet_get_status(po, h.raw))
486                 return NULL;
487
488         return h.raw;
489 }
490
491 static void *packet_current_frame(struct packet_sock *po,
492                 struct packet_ring_buffer *rb,
493                 int status)
494 {
495         return packet_lookup_frame(po, rb, rb->head, status);
496 }
497
498 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
499 {
500         del_timer_sync(&pkc->retire_blk_timer);
501 }
502
503 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
504                 struct sk_buff_head *rb_queue)
505 {
506         struct tpacket_kbdq_core *pkc;
507
508         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
509
510         spin_lock_bh(&rb_queue->lock);
511         pkc->delete_blk_timer = 1;
512         spin_unlock_bh(&rb_queue->lock);
513
514         prb_del_retire_blk_timer(pkc);
515 }
516
517 static void prb_setup_retire_blk_timer(struct packet_sock *po)
518 {
519         struct tpacket_kbdq_core *pkc;
520
521         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
522         timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
523                     0);
524         pkc->retire_blk_timer.expires = jiffies;
525 }
526
527 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528                                 int blk_size_in_bytes)
529 {
530         struct net_device *dev;
531         unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532         struct ethtool_link_ksettings ecmd;
533         int err;
534
535         rtnl_lock();
536         dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
537         if (unlikely(!dev)) {
538                 rtnl_unlock();
539                 return DEFAULT_PRB_RETIRE_TOV;
540         }
541         err = __ethtool_get_link_ksettings(dev, &ecmd);
542         rtnl_unlock();
543         if (!err) {
544                 /*
545                  * If the link speed is so slow you don't really
546                  * need to worry about perf anyways
547                  */
548                 if (ecmd.base.speed < SPEED_1000 ||
549                     ecmd.base.speed == SPEED_UNKNOWN) {
550                         return DEFAULT_PRB_RETIRE_TOV;
551                 } else {
552                         msec = 1;
553                         div = ecmd.base.speed / 1000;
554                 }
555         }
556
557         mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
558
559         if (div)
560                 mbits /= div;
561
562         tmo = mbits * msec;
563
564         if (div)
565                 return tmo+1;
566         return tmo;
567 }
568
569 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
570                         union tpacket_req_u *req_u)
571 {
572         p1->feature_req_word = req_u->req3.tp_feature_req_word;
573 }
574
575 static void init_prb_bdqc(struct packet_sock *po,
576                         struct packet_ring_buffer *rb,
577                         struct pgv *pg_vec,
578                         union tpacket_req_u *req_u)
579 {
580         struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
581         struct tpacket_block_desc *pbd;
582
583         memset(p1, 0x0, sizeof(*p1));
584
585         p1->knxt_seq_num = 1;
586         p1->pkbdq = pg_vec;
587         pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
588         p1->pkblk_start = pg_vec[0].buffer;
589         p1->kblk_size = req_u->req3.tp_block_size;
590         p1->knum_blocks = req_u->req3.tp_block_nr;
591         p1->hdrlen = po->tp_hdrlen;
592         p1->version = po->tp_version;
593         p1->last_kactive_blk_num = 0;
594         po->stats.stats3.tp_freeze_q_cnt = 0;
595         if (req_u->req3.tp_retire_blk_tov)
596                 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
597         else
598                 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
599                                                 req_u->req3.tp_block_size);
600         p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
601         p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
602
603         p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
604         prb_init_ft_ops(p1, req_u);
605         prb_setup_retire_blk_timer(po);
606         prb_open_block(p1, pbd);
607 }
608
609 /*  Do NOT update the last_blk_num first.
610  *  Assumes sk_buff_head lock is held.
611  */
612 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
613 {
614         mod_timer(&pkc->retire_blk_timer,
615                         jiffies + pkc->tov_in_jiffies);
616         pkc->last_kactive_blk_num = pkc->kactive_blk_num;
617 }
618
619 /*
620  * Timer logic:
621  * 1) We refresh the timer only when we open a block.
622  *    By doing this we don't waste cycles refreshing the timer
623  *        on packet-by-packet basis.
624  *
625  * With a 1MB block-size, on a 1Gbps line, it will take
626  * i) ~8 ms to fill a block + ii) memcpy etc.
627  * In this cut we are not accounting for the memcpy time.
628  *
629  * So, if the user sets the 'tmo' to 10ms then the timer
630  * will never fire while the block is still getting filled
631  * (which is what we want). However, the user could choose
632  * to close a block early and that's fine.
633  *
634  * But when the timer does fire, we check whether or not to refresh it.
635  * Since the tmo granularity is in msecs, it is not too expensive
636  * to refresh the timer, lets say every '8' msecs.
637  * Either the user can set the 'tmo' or we can derive it based on
638  * a) line-speed and b) block-size.
639  * prb_calc_retire_blk_tmo() calculates the tmo.
640  *
641  */
642 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
643 {
644         struct packet_sock *po =
645                 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
646         struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
647         unsigned int frozen;
648         struct tpacket_block_desc *pbd;
649
650         spin_lock(&po->sk.sk_receive_queue.lock);
651
652         frozen = prb_queue_frozen(pkc);
653         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
654
655         if (unlikely(pkc->delete_blk_timer))
656                 goto out;
657
658         /* We only need to plug the race when the block is partially filled.
659          * tpacket_rcv:
660          *              lock(); increment BLOCK_NUM_PKTS; unlock()
661          *              copy_bits() is in progress ...
662          *              timer fires on other cpu:
663          *              we can't retire the current block because copy_bits
664          *              is in progress.
665          *
666          */
667         if (BLOCK_NUM_PKTS(pbd)) {
668                 while (atomic_read(&pkc->blk_fill_in_prog)) {
669                         /* Waiting for skb_copy_bits to finish... */
670                         cpu_relax();
671                 }
672         }
673
674         if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
675                 if (!frozen) {
676                         if (!BLOCK_NUM_PKTS(pbd)) {
677                                 /* An empty block. Just refresh the timer. */
678                                 goto refresh_timer;
679                         }
680                         prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
681                         if (!prb_dispatch_next_block(pkc, po))
682                                 goto refresh_timer;
683                         else
684                                 goto out;
685                 } else {
686                         /* Case 1. Queue was frozen because user-space was
687                          *         lagging behind.
688                          */
689                         if (prb_curr_blk_in_use(pbd)) {
690                                 /*
691                                  * Ok, user-space is still behind.
692                                  * So just refresh the timer.
693                                  */
694                                 goto refresh_timer;
695                         } else {
696                                /* Case 2. queue was frozen,user-space caught up,
697                                 * now the link went idle && the timer fired.
698                                 * We don't have a block to close.So we open this
699                                 * block and restart the timer.
700                                 * opening a block thaws the queue,restarts timer
701                                 * Thawing/timer-refresh is a side effect.
702                                 */
703                                 prb_open_block(pkc, pbd);
704                                 goto out;
705                         }
706                 }
707         }
708
709 refresh_timer:
710         _prb_refresh_rx_retire_blk_timer(pkc);
711
712 out:
713         spin_unlock(&po->sk.sk_receive_queue.lock);
714 }
715
716 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
717                 struct tpacket_block_desc *pbd1, __u32 status)
718 {
719         /* Flush everything minus the block header */
720
721 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
722         u8 *start, *end;
723
724         start = (u8 *)pbd1;
725
726         /* Skip the block header(we know header WILL fit in 4K) */
727         start += PAGE_SIZE;
728
729         end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
730         for (; start < end; start += PAGE_SIZE)
731                 flush_dcache_page(pgv_to_page(start));
732
733         smp_wmb();
734 #endif
735
736         /* Now update the block status. */
737
738         BLOCK_STATUS(pbd1) = status;
739
740         /* Flush the block header */
741
742 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
743         start = (u8 *)pbd1;
744         flush_dcache_page(pgv_to_page(start));
745
746         smp_wmb();
747 #endif
748 }
749
750 /*
751  * Side effect:
752  *
753  * 1) flush the block
754  * 2) Increment active_blk_num
755  *
756  * Note:We DONT refresh the timer on purpose.
757  *      Because almost always the next block will be opened.
758  */
759 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
760                 struct tpacket_block_desc *pbd1,
761                 struct packet_sock *po, unsigned int stat)
762 {
763         __u32 status = TP_STATUS_USER | stat;
764
765         struct tpacket3_hdr *last_pkt;
766         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
767         struct sock *sk = &po->sk;
768
769         if (po->stats.stats3.tp_drops)
770                 status |= TP_STATUS_LOSING;
771
772         last_pkt = (struct tpacket3_hdr *)pkc1->prev;
773         last_pkt->tp_next_offset = 0;
774
775         /* Get the ts of the last pkt */
776         if (BLOCK_NUM_PKTS(pbd1)) {
777                 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
778                 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
779         } else {
780                 /* Ok, we tmo'd - so get the current time.
781                  *
782                  * It shouldn't really happen as we don't close empty
783                  * blocks. See prb_retire_rx_blk_timer_expired().
784                  */
785                 struct timespec ts;
786                 getnstimeofday(&ts);
787                 h1->ts_last_pkt.ts_sec = ts.tv_sec;
788                 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
789         }
790
791         smp_wmb();
792
793         /* Flush the block */
794         prb_flush_block(pkc1, pbd1, status);
795
796         sk->sk_data_ready(sk);
797
798         pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
799 }
800
801 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
802 {
803         pkc->reset_pending_on_curr_blk = 0;
804 }
805
806 /*
807  * Side effect of opening a block:
808  *
809  * 1) prb_queue is thawed.
810  * 2) retire_blk_timer is refreshed.
811  *
812  */
813 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
814         struct tpacket_block_desc *pbd1)
815 {
816         struct timespec ts;
817         struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
818
819         smp_rmb();
820
821         /* We could have just memset this but we will lose the
822          * flexibility of making the priv area sticky
823          */
824
825         BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
826         BLOCK_NUM_PKTS(pbd1) = 0;
827         BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
828
829         getnstimeofday(&ts);
830
831         h1->ts_first_pkt.ts_sec = ts.tv_sec;
832         h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
833
834         pkc1->pkblk_start = (char *)pbd1;
835         pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836
837         BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
838         BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
839
840         pbd1->version = pkc1->version;
841         pkc1->prev = pkc1->nxt_offset;
842         pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
843
844         prb_thaw_queue(pkc1);
845         _prb_refresh_rx_retire_blk_timer(pkc1);
846
847         smp_wmb();
848 }
849
850 /*
851  * Queue freeze logic:
852  * 1) Assume tp_block_nr = 8 blocks.
853  * 2) At time 't0', user opens Rx ring.
854  * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
855  * 4) user-space is either sleeping or processing block '0'.
856  * 5) tpacket_rcv is currently filling block '7', since there is no space left,
857  *    it will close block-7,loop around and try to fill block '0'.
858  *    call-flow:
859  *    __packet_lookup_frame_in_block
860  *      prb_retire_current_block()
861  *      prb_dispatch_next_block()
862  *        |->(BLOCK_STATUS == USER) evaluates to true
863  *    5.1) Since block-0 is currently in-use, we just freeze the queue.
864  * 6) Now there are two cases:
865  *    6.1) Link goes idle right after the queue is frozen.
866  *         But remember, the last open_block() refreshed the timer.
867  *         When this timer expires,it will refresh itself so that we can
868  *         re-open block-0 in near future.
869  *    6.2) Link is busy and keeps on receiving packets. This is a simple
870  *         case and __packet_lookup_frame_in_block will check if block-0
871  *         is free and can now be re-used.
872  */
873 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
874                                   struct packet_sock *po)
875 {
876         pkc->reset_pending_on_curr_blk = 1;
877         po->stats.stats3.tp_freeze_q_cnt++;
878 }
879
880 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
881
882 /*
883  * If the next block is free then we will dispatch it
884  * and return a good offset.
885  * Else, we will freeze the queue.
886  * So, caller must check the return value.
887  */
888 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
889                 struct packet_sock *po)
890 {
891         struct tpacket_block_desc *pbd;
892
893         smp_rmb();
894
895         /* 1. Get current block num */
896         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
897
898         /* 2. If this block is currently in_use then freeze the queue */
899         if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
900                 prb_freeze_queue(pkc, po);
901                 return NULL;
902         }
903
904         /*
905          * 3.
906          * open this block and return the offset where the first packet
907          * needs to get stored.
908          */
909         prb_open_block(pkc, pbd);
910         return (void *)pkc->nxt_offset;
911 }
912
913 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
914                 struct packet_sock *po, unsigned int status)
915 {
916         struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
917
918         /* retire/close the current block */
919         if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
920                 /*
921                  * Plug the case where copy_bits() is in progress on
922                  * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
923                  * have space to copy the pkt in the current block and
924                  * called prb_retire_current_block()
925                  *
926                  * We don't need to worry about the TMO case because
927                  * the timer-handler already handled this case.
928                  */
929                 if (!(status & TP_STATUS_BLK_TMO)) {
930                         while (atomic_read(&pkc->blk_fill_in_prog)) {
931                                 /* Waiting for skb_copy_bits to finish... */
932                                 cpu_relax();
933                         }
934                 }
935                 prb_close_block(pkc, pbd, po, status);
936                 return;
937         }
938 }
939
940 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
941 {
942         return TP_STATUS_USER & BLOCK_STATUS(pbd);
943 }
944
945 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
946 {
947         return pkc->reset_pending_on_curr_blk;
948 }
949
950 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
951 {
952         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
953         atomic_dec(&pkc->blk_fill_in_prog);
954 }
955
956 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
957                         struct tpacket3_hdr *ppd)
958 {
959         ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
960 }
961
962 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
963                         struct tpacket3_hdr *ppd)
964 {
965         ppd->hv1.tp_rxhash = 0;
966 }
967
968 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
969                         struct tpacket3_hdr *ppd)
970 {
971         if (skb_vlan_tag_present(pkc->skb)) {
972                 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
973                 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
974                 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
975         } else {
976                 ppd->hv1.tp_vlan_tci = 0;
977                 ppd->hv1.tp_vlan_tpid = 0;
978                 ppd->tp_status = TP_STATUS_AVAILABLE;
979         }
980 }
981
982 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
983                         struct tpacket3_hdr *ppd)
984 {
985         ppd->hv1.tp_padding = 0;
986         prb_fill_vlan_info(pkc, ppd);
987
988         if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989                 prb_fill_rxhash(pkc, ppd);
990         else
991                 prb_clear_rxhash(pkc, ppd);
992 }
993
994 static void prb_fill_curr_block(char *curr,
995                                 struct tpacket_kbdq_core *pkc,
996                                 struct tpacket_block_desc *pbd,
997                                 unsigned int len)
998 {
999         struct tpacket3_hdr *ppd;
1000
1001         ppd  = (struct tpacket3_hdr *)curr;
1002         ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003         pkc->prev = curr;
1004         pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005         BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006         BLOCK_NUM_PKTS(pbd) += 1;
1007         atomic_inc(&pkc->blk_fill_in_prog);
1008         prb_run_all_ft_ops(pkc, ppd);
1009 }
1010
1011 /* Assumes caller has the sk->rx_queue.lock */
1012 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013                                             struct sk_buff *skb,
1014                                                 int status,
1015                                             unsigned int len
1016                                             )
1017 {
1018         struct tpacket_kbdq_core *pkc;
1019         struct tpacket_block_desc *pbd;
1020         char *curr, *end;
1021
1022         pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1023         pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025         /* Queue is frozen when user space is lagging behind */
1026         if (prb_queue_frozen(pkc)) {
1027                 /*
1028                  * Check if that last block which caused the queue to freeze,
1029                  * is still in_use by user-space.
1030                  */
1031                 if (prb_curr_blk_in_use(pbd)) {
1032                         /* Can't record this packet */
1033                         return NULL;
1034                 } else {
1035                         /*
1036                          * Ok, the block was released by user-space.
1037                          * Now let's open that block.
1038                          * opening a block also thaws the queue.
1039                          * Thawing is a side effect.
1040                          */
1041                         prb_open_block(pkc, pbd);
1042                 }
1043         }
1044
1045         smp_mb();
1046         curr = pkc->nxt_offset;
1047         pkc->skb = skb;
1048         end = (char *)pbd + pkc->kblk_size;
1049
1050         /* first try the current block */
1051         if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052                 prb_fill_curr_block(curr, pkc, pbd, len);
1053                 return (void *)curr;
1054         }
1055
1056         /* Ok, close the current block */
1057         prb_retire_current_block(pkc, po, 0);
1058
1059         /* Now, try to dispatch the next block */
1060         curr = (char *)prb_dispatch_next_block(pkc, po);
1061         if (curr) {
1062                 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063                 prb_fill_curr_block(curr, pkc, pbd, len);
1064                 return (void *)curr;
1065         }
1066
1067         /*
1068          * No free blocks are available.user_space hasn't caught up yet.
1069          * Queue was just frozen and now this packet will get dropped.
1070          */
1071         return NULL;
1072 }
1073
1074 static void *packet_current_rx_frame(struct packet_sock *po,
1075                                             struct sk_buff *skb,
1076                                             int status, unsigned int len)
1077 {
1078         char *curr = NULL;
1079         switch (po->tp_version) {
1080         case TPACKET_V1:
1081         case TPACKET_V2:
1082                 curr = packet_lookup_frame(po, &po->rx_ring,
1083                                         po->rx_ring.head, status);
1084                 return curr;
1085         case TPACKET_V3:
1086                 return __packet_lookup_frame_in_block(po, skb, status, len);
1087         default:
1088                 WARN(1, "TPACKET version not supported\n");
1089                 BUG();
1090                 return NULL;
1091         }
1092 }
1093
1094 static void *prb_lookup_block(struct packet_sock *po,
1095                                      struct packet_ring_buffer *rb,
1096                                      unsigned int idx,
1097                                      int status)
1098 {
1099         struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1100         struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1101
1102         if (status != BLOCK_STATUS(pbd))
1103                 return NULL;
1104         return pbd;
1105 }
1106
1107 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108 {
1109         unsigned int prev;
1110         if (rb->prb_bdqc.kactive_blk_num)
1111                 prev = rb->prb_bdqc.kactive_blk_num-1;
1112         else
1113                 prev = rb->prb_bdqc.knum_blocks-1;
1114         return prev;
1115 }
1116
1117 /* Assumes caller has held the rx_queue.lock */
1118 static void *__prb_previous_block(struct packet_sock *po,
1119                                          struct packet_ring_buffer *rb,
1120                                          int status)
1121 {
1122         unsigned int previous = prb_previous_blk_num(rb);
1123         return prb_lookup_block(po, rb, previous, status);
1124 }
1125
1126 static void *packet_previous_rx_frame(struct packet_sock *po,
1127                                              struct packet_ring_buffer *rb,
1128                                              int status)
1129 {
1130         if (po->tp_version <= TPACKET_V2)
1131                 return packet_previous_frame(po, rb, status);
1132
1133         return __prb_previous_block(po, rb, status);
1134 }
1135
1136 static void packet_increment_rx_head(struct packet_sock *po,
1137                                             struct packet_ring_buffer *rb)
1138 {
1139         switch (po->tp_version) {
1140         case TPACKET_V1:
1141         case TPACKET_V2:
1142                 return packet_increment_head(rb);
1143         case TPACKET_V3:
1144         default:
1145                 WARN(1, "TPACKET version not supported.\n");
1146                 BUG();
1147                 return;
1148         }
1149 }
1150
1151 static void *packet_previous_frame(struct packet_sock *po,
1152                 struct packet_ring_buffer *rb,
1153                 int status)
1154 {
1155         unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1156         return packet_lookup_frame(po, rb, previous, status);
1157 }
1158
1159 static void packet_increment_head(struct packet_ring_buffer *buff)
1160 {
1161         buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162 }
1163
1164 static void packet_inc_pending(struct packet_ring_buffer *rb)
1165 {
1166         this_cpu_inc(*rb->pending_refcnt);
1167 }
1168
1169 static void packet_dec_pending(struct packet_ring_buffer *rb)
1170 {
1171         this_cpu_dec(*rb->pending_refcnt);
1172 }
1173
1174 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1175 {
1176         unsigned int refcnt = 0;
1177         int cpu;
1178
1179         /* We don't use pending refcount in rx_ring. */
1180         if (rb->pending_refcnt == NULL)
1181                 return 0;
1182
1183         for_each_possible_cpu(cpu)
1184                 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1185
1186         return refcnt;
1187 }
1188
1189 static int packet_alloc_pending(struct packet_sock *po)
1190 {
1191         po->rx_ring.pending_refcnt = NULL;
1192
1193         po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1194         if (unlikely(po->tx_ring.pending_refcnt == NULL))
1195                 return -ENOBUFS;
1196
1197         return 0;
1198 }
1199
1200 static void packet_free_pending(struct packet_sock *po)
1201 {
1202         free_percpu(po->tx_ring.pending_refcnt);
1203 }
1204
1205 #define ROOM_POW_OFF    2
1206 #define ROOM_NONE       0x0
1207 #define ROOM_LOW        0x1
1208 #define ROOM_NORMAL     0x2
1209
1210 static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1211 {
1212         int idx, len;
1213
1214         len = po->rx_ring.frame_max + 1;
1215         idx = po->rx_ring.head;
1216         if (pow_off)
1217                 idx += len >> pow_off;
1218         if (idx >= len)
1219                 idx -= len;
1220         return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1221 }
1222
1223 static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1224 {
1225         int idx, len;
1226
1227         len = po->rx_ring.prb_bdqc.knum_blocks;
1228         idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1229         if (pow_off)
1230                 idx += len >> pow_off;
1231         if (idx >= len)
1232                 idx -= len;
1233         return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1234 }
1235
1236 static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1237 {
1238         struct sock *sk = &po->sk;
1239         int ret = ROOM_NONE;
1240
1241         if (po->prot_hook.func != tpacket_rcv) {
1242                 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1243                                           - (skb ? skb->truesize : 0);
1244                 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1245                         return ROOM_NORMAL;
1246                 else if (avail > 0)
1247                         return ROOM_LOW;
1248                 else
1249                         return ROOM_NONE;
1250         }
1251
1252         if (po->tp_version == TPACKET_V3) {
1253                 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1254                         ret = ROOM_NORMAL;
1255                 else if (__tpacket_v3_has_room(po, 0))
1256                         ret = ROOM_LOW;
1257         } else {
1258                 if (__tpacket_has_room(po, ROOM_POW_OFF))
1259                         ret = ROOM_NORMAL;
1260                 else if (__tpacket_has_room(po, 0))
1261                         ret = ROOM_LOW;
1262         }
1263
1264         return ret;
1265 }
1266
1267 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1268 {
1269         int ret;
1270         bool has_room;
1271
1272         spin_lock_bh(&po->sk.sk_receive_queue.lock);
1273         ret = __packet_rcv_has_room(po, skb);
1274         has_room = ret == ROOM_NORMAL;
1275         if (po->pressure == has_room)
1276                 po->pressure = !has_room;
1277         spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1278
1279         return ret;
1280 }
1281
1282 static void packet_sock_destruct(struct sock *sk)
1283 {
1284         skb_queue_purge(&sk->sk_error_queue);
1285
1286         WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1287         WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1288
1289         if (!sock_flag(sk, SOCK_DEAD)) {
1290                 pr_err("Attempt to release alive packet socket: %p\n", sk);
1291                 return;
1292         }
1293
1294         sk_refcnt_debug_dec(sk);
1295 }
1296
1297 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1298 {
1299         u32 rxhash;
1300         int i, count = 0;
1301
1302         rxhash = skb_get_hash(skb);
1303         for (i = 0; i < ROLLOVER_HLEN; i++)
1304                 if (po->rollover->history[i] == rxhash)
1305                         count++;
1306
1307         po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1308         return count > (ROLLOVER_HLEN >> 1);
1309 }
1310
1311 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1312                                       struct sk_buff *skb,
1313                                       unsigned int num)
1314 {
1315         return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1316 }
1317
1318 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1319                                     struct sk_buff *skb,
1320                                     unsigned int num)
1321 {
1322         unsigned int val = atomic_inc_return(&f->rr_cur);
1323
1324         return val % num;
1325 }
1326
1327 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1328                                      struct sk_buff *skb,
1329                                      unsigned int num)
1330 {
1331         return smp_processor_id() % num;
1332 }
1333
1334 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1335                                      struct sk_buff *skb,
1336                                      unsigned int num)
1337 {
1338         return prandom_u32_max(num);
1339 }
1340
1341 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1342                                           struct sk_buff *skb,
1343                                           unsigned int idx, bool try_self,
1344                                           unsigned int num)
1345 {
1346         struct packet_sock *po, *po_next, *po_skip = NULL;
1347         unsigned int i, j, room = ROOM_NONE;
1348
1349         po = pkt_sk(f->arr[idx]);
1350
1351         if (try_self) {
1352                 room = packet_rcv_has_room(po, skb);
1353                 if (room == ROOM_NORMAL ||
1354                     (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1355                         return idx;
1356                 po_skip = po;
1357         }
1358
1359         i = j = min_t(int, po->rollover->sock, num - 1);
1360         do {
1361                 po_next = pkt_sk(f->arr[i]);
1362                 if (po_next != po_skip && !po_next->pressure &&
1363                     packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1364                         if (i != j)
1365                                 po->rollover->sock = i;
1366                         atomic_long_inc(&po->rollover->num);
1367                         if (room == ROOM_LOW)
1368                                 atomic_long_inc(&po->rollover->num_huge);
1369                         return i;
1370                 }
1371
1372                 if (++i == num)
1373                         i = 0;
1374         } while (i != j);
1375
1376         atomic_long_inc(&po->rollover->num_failed);
1377         return idx;
1378 }
1379
1380 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1381                                     struct sk_buff *skb,
1382                                     unsigned int num)
1383 {
1384         return skb_get_queue_mapping(skb) % num;
1385 }
1386
1387 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1388                                      struct sk_buff *skb,
1389                                      unsigned int num)
1390 {
1391         struct bpf_prog *prog;
1392         unsigned int ret = 0;
1393
1394         rcu_read_lock();
1395         prog = rcu_dereference(f->bpf_prog);
1396         if (prog)
1397                 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1398         rcu_read_unlock();
1399
1400         return ret;
1401 }
1402
1403 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1404 {
1405         return f->flags & (flag >> 8);
1406 }
1407
1408 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1409                              struct packet_type *pt, struct net_device *orig_dev)
1410 {
1411         struct packet_fanout *f = pt->af_packet_priv;
1412         unsigned int num = READ_ONCE(f->num_members);
1413         struct net *net = read_pnet(&f->net);
1414         struct packet_sock *po;
1415         unsigned int idx;
1416
1417         if (!net_eq(dev_net(dev), net) || !num) {
1418                 kfree_skb(skb);
1419                 return 0;
1420         }
1421
1422         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1423                 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1424                 if (!skb)
1425                         return 0;
1426         }
1427         switch (f->type) {
1428         case PACKET_FANOUT_HASH:
1429         default:
1430                 idx = fanout_demux_hash(f, skb, num);
1431                 break;
1432         case PACKET_FANOUT_LB:
1433                 idx = fanout_demux_lb(f, skb, num);
1434                 break;
1435         case PACKET_FANOUT_CPU:
1436                 idx = fanout_demux_cpu(f, skb, num);
1437                 break;
1438         case PACKET_FANOUT_RND:
1439                 idx = fanout_demux_rnd(f, skb, num);
1440                 break;
1441         case PACKET_FANOUT_QM:
1442                 idx = fanout_demux_qm(f, skb, num);
1443                 break;
1444         case PACKET_FANOUT_ROLLOVER:
1445                 idx = fanout_demux_rollover(f, skb, 0, false, num);
1446                 break;
1447         case PACKET_FANOUT_CBPF:
1448         case PACKET_FANOUT_EBPF:
1449                 idx = fanout_demux_bpf(f, skb, num);
1450                 break;
1451         }
1452
1453         if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1454                 idx = fanout_demux_rollover(f, skb, idx, true, num);
1455
1456         po = pkt_sk(f->arr[idx]);
1457         return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1458 }
1459
1460 DEFINE_MUTEX(fanout_mutex);
1461 EXPORT_SYMBOL_GPL(fanout_mutex);
1462 static LIST_HEAD(fanout_list);
1463 static u16 fanout_next_id;
1464
1465 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1466 {
1467         struct packet_fanout *f = po->fanout;
1468
1469         spin_lock(&f->lock);
1470         f->arr[f->num_members] = sk;
1471         smp_wmb();
1472         f->num_members++;
1473         if (f->num_members == 1)
1474                 dev_add_pack(&f->prot_hook);
1475         spin_unlock(&f->lock);
1476 }
1477
1478 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1479 {
1480         struct packet_fanout *f = po->fanout;
1481         int i;
1482
1483         spin_lock(&f->lock);
1484         for (i = 0; i < f->num_members; i++) {
1485                 if (f->arr[i] == sk)
1486                         break;
1487         }
1488         BUG_ON(i >= f->num_members);
1489         f->arr[i] = f->arr[f->num_members - 1];
1490         f->num_members--;
1491         if (f->num_members == 0)
1492                 __dev_remove_pack(&f->prot_hook);
1493         spin_unlock(&f->lock);
1494 }
1495
1496 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1497 {
1498         if (sk->sk_family != PF_PACKET)
1499                 return false;
1500
1501         return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1502 }
1503
1504 static void fanout_init_data(struct packet_fanout *f)
1505 {
1506         switch (f->type) {
1507         case PACKET_FANOUT_LB:
1508                 atomic_set(&f->rr_cur, 0);
1509                 break;
1510         case PACKET_FANOUT_CBPF:
1511         case PACKET_FANOUT_EBPF:
1512                 RCU_INIT_POINTER(f->bpf_prog, NULL);
1513                 break;
1514         }
1515 }
1516
1517 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1518 {
1519         struct bpf_prog *old;
1520
1521         spin_lock(&f->lock);
1522         old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1523         rcu_assign_pointer(f->bpf_prog, new);
1524         spin_unlock(&f->lock);
1525
1526         if (old) {
1527                 synchronize_net();
1528                 bpf_prog_destroy(old);
1529         }
1530 }
1531
1532 static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1533                                 unsigned int len)
1534 {
1535         struct bpf_prog *new;
1536         struct sock_fprog fprog;
1537         int ret;
1538
1539         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1540                 return -EPERM;
1541         if (len != sizeof(fprog))
1542                 return -EINVAL;
1543         if (copy_from_user(&fprog, data, len))
1544                 return -EFAULT;
1545
1546         ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1547         if (ret)
1548                 return ret;
1549
1550         __fanout_set_data_bpf(po->fanout, new);
1551         return 0;
1552 }
1553
1554 static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1555                                 unsigned int len)
1556 {
1557         struct bpf_prog *new;
1558         u32 fd;
1559
1560         if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1561                 return -EPERM;
1562         if (len != sizeof(fd))
1563                 return -EINVAL;
1564         if (copy_from_user(&fd, data, len))
1565                 return -EFAULT;
1566
1567         new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1568         if (IS_ERR(new))
1569                 return PTR_ERR(new);
1570
1571         __fanout_set_data_bpf(po->fanout, new);
1572         return 0;
1573 }
1574
1575 static int fanout_set_data(struct packet_sock *po, char __user *data,
1576                            unsigned int len)
1577 {
1578         switch (po->fanout->type) {
1579         case PACKET_FANOUT_CBPF:
1580                 return fanout_set_data_cbpf(po, data, len);
1581         case PACKET_FANOUT_EBPF:
1582                 return fanout_set_data_ebpf(po, data, len);
1583         default:
1584                 return -EINVAL;
1585         }
1586 }
1587
1588 static void fanout_release_data(struct packet_fanout *f)
1589 {
1590         switch (f->type) {
1591         case PACKET_FANOUT_CBPF:
1592         case PACKET_FANOUT_EBPF:
1593                 __fanout_set_data_bpf(f, NULL);
1594         }
1595 }
1596
1597 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1598 {
1599         struct packet_fanout *f;
1600
1601         list_for_each_entry(f, &fanout_list, list) {
1602                 if (f->id == candidate_id &&
1603                     read_pnet(&f->net) == sock_net(sk)) {
1604                         return false;
1605                 }
1606         }
1607         return true;
1608 }
1609
1610 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1611 {
1612         u16 id = fanout_next_id;
1613
1614         do {
1615                 if (__fanout_id_is_free(sk, id)) {
1616                         *new_id = id;
1617                         fanout_next_id = id + 1;
1618                         return true;
1619                 }
1620
1621                 id++;
1622         } while (id != fanout_next_id);
1623
1624         return false;
1625 }
1626
1627 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1628 {
1629         struct packet_rollover *rollover = NULL;
1630         struct packet_sock *po = pkt_sk(sk);
1631         struct packet_fanout *f, *match;
1632         u8 type = type_flags & 0xff;
1633         u8 flags = type_flags >> 8;
1634         int err;
1635
1636         switch (type) {
1637         case PACKET_FANOUT_ROLLOVER:
1638                 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1639                         return -EINVAL;
1640         case PACKET_FANOUT_HASH:
1641         case PACKET_FANOUT_LB:
1642         case PACKET_FANOUT_CPU:
1643         case PACKET_FANOUT_RND:
1644         case PACKET_FANOUT_QM:
1645         case PACKET_FANOUT_CBPF:
1646         case PACKET_FANOUT_EBPF:
1647                 break;
1648         default:
1649                 return -EINVAL;
1650         }
1651
1652         mutex_lock(&fanout_mutex);
1653
1654         err = -EALREADY;
1655         if (po->fanout)
1656                 goto out;
1657
1658         if (type == PACKET_FANOUT_ROLLOVER ||
1659             (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1660                 err = -ENOMEM;
1661                 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1662                 if (!rollover)
1663                         goto out;
1664                 atomic_long_set(&rollover->num, 0);
1665                 atomic_long_set(&rollover->num_huge, 0);
1666                 atomic_long_set(&rollover->num_failed, 0);
1667         }
1668
1669         if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1670                 if (id != 0) {
1671                         err = -EINVAL;
1672                         goto out;
1673                 }
1674                 if (!fanout_find_new_id(sk, &id)) {
1675                         err = -ENOMEM;
1676                         goto out;
1677                 }
1678                 /* ephemeral flag for the first socket in the group: drop it */
1679                 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1680         }
1681
1682         match = NULL;
1683         list_for_each_entry(f, &fanout_list, list) {
1684                 if (f->id == id &&
1685                     read_pnet(&f->net) == sock_net(sk)) {
1686                         match = f;
1687                         break;
1688                 }
1689         }
1690         err = -EINVAL;
1691         if (match && match->flags != flags)
1692                 goto out;
1693         if (!match) {
1694                 err = -ENOMEM;
1695                 match = kzalloc(sizeof(*match), GFP_KERNEL);
1696                 if (!match)
1697                         goto out;
1698                 write_pnet(&match->net, sock_net(sk));
1699                 match->id = id;
1700                 match->type = type;
1701                 match->flags = flags;
1702                 INIT_LIST_HEAD(&match->list);
1703                 spin_lock_init(&match->lock);
1704                 refcount_set(&match->sk_ref, 0);
1705                 fanout_init_data(match);
1706                 match->prot_hook.type = po->prot_hook.type;
1707                 match->prot_hook.dev = po->prot_hook.dev;
1708                 match->prot_hook.func = packet_rcv_fanout;
1709                 match->prot_hook.af_packet_priv = match;
1710                 match->prot_hook.id_match = match_fanout_group;
1711                 list_add(&match->list, &fanout_list);
1712         }
1713         err = -EINVAL;
1714
1715         spin_lock(&po->bind_lock);
1716         if (po->running &&
1717             match->type == type &&
1718             match->prot_hook.type == po->prot_hook.type &&
1719             match->prot_hook.dev == po->prot_hook.dev) {
1720                 err = -ENOSPC;
1721                 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1722                         __dev_remove_pack(&po->prot_hook);
1723                         po->fanout = match;
1724                         po->rollover = rollover;
1725                         rollover = NULL;
1726                         refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1727                         __fanout_link(sk, po);
1728                         err = 0;
1729                 }
1730         }
1731         spin_unlock(&po->bind_lock);
1732
1733         if (err && !refcount_read(&match->sk_ref)) {
1734                 list_del(&match->list);
1735                 kfree(match);
1736         }
1737
1738 out:
1739         kfree(rollover);
1740         mutex_unlock(&fanout_mutex);
1741         return err;
1742 }
1743
1744 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1745  * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1746  * It is the responsibility of the caller to call fanout_release_data() and
1747  * free the returned packet_fanout (after synchronize_net())
1748  */
1749 static struct packet_fanout *fanout_release(struct sock *sk)
1750 {
1751         struct packet_sock *po = pkt_sk(sk);
1752         struct packet_fanout *f;
1753
1754         mutex_lock(&fanout_mutex);
1755         f = po->fanout;
1756         if (f) {
1757                 po->fanout = NULL;
1758
1759                 if (refcount_dec_and_test(&f->sk_ref))
1760                         list_del(&f->list);
1761                 else
1762                         f = NULL;
1763         }
1764         mutex_unlock(&fanout_mutex);
1765
1766         return f;
1767 }
1768
1769 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1770                                           struct sk_buff *skb)
1771 {
1772         /* Earlier code assumed this would be a VLAN pkt, double-check
1773          * this now that we have the actual packet in hand. We can only
1774          * do this check on Ethernet devices.
1775          */
1776         if (unlikely(dev->type != ARPHRD_ETHER))
1777                 return false;
1778
1779         skb_reset_mac_header(skb);
1780         return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1781 }
1782
1783 static const struct proto_ops packet_ops;
1784
1785 static const struct proto_ops packet_ops_spkt;
1786
1787 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1788                            struct packet_type *pt, struct net_device *orig_dev)
1789 {
1790         struct sock *sk;
1791         struct sockaddr_pkt *spkt;
1792
1793         /*
1794          *      When we registered the protocol we saved the socket in the data
1795          *      field for just this event.
1796          */
1797
1798         sk = pt->af_packet_priv;
1799
1800         /*
1801          *      Yank back the headers [hope the device set this
1802          *      right or kerboom...]
1803          *
1804          *      Incoming packets have ll header pulled,
1805          *      push it back.
1806          *
1807          *      For outgoing ones skb->data == skb_mac_header(skb)
1808          *      so that this procedure is noop.
1809          */
1810
1811         if (skb->pkt_type == PACKET_LOOPBACK)
1812                 goto out;
1813
1814         if (!net_eq(dev_net(dev), sock_net(sk)))
1815                 goto out;
1816
1817         skb = skb_share_check(skb, GFP_ATOMIC);
1818         if (skb == NULL)
1819                 goto oom;
1820
1821         /* drop any routing info */
1822         skb_dst_drop(skb);
1823
1824         /* drop conntrack reference */
1825         nf_reset(skb);
1826
1827         spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1828
1829         skb_push(skb, skb->data - skb_mac_header(skb));
1830
1831         /*
1832          *      The SOCK_PACKET socket receives _all_ frames.
1833          */
1834
1835         spkt->spkt_family = dev->type;
1836         strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1837         spkt->spkt_protocol = skb->protocol;
1838
1839         /*
1840          *      Charge the memory to the socket. This is done specifically
1841          *      to prevent sockets using all the memory up.
1842          */
1843
1844         if (sock_queue_rcv_skb(sk, skb) == 0)
1845                 return 0;
1846
1847 out:
1848         kfree_skb(skb);
1849 oom:
1850         return 0;
1851 }
1852
1853 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1854 {
1855         if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1856             sock->type == SOCK_RAW) {
1857                 skb_reset_mac_header(skb);
1858                 skb->protocol = dev_parse_header_protocol(skb);
1859         }
1860
1861         skb_probe_transport_header(skb);
1862 }
1863
1864 /*
1865  *      Output a raw packet to a device layer. This bypasses all the other
1866  *      protocol layers and you must therefore supply it with a complete frame
1867  */
1868
1869 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1870                                size_t len)
1871 {
1872         struct sock *sk = sock->sk;
1873         DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1874         struct sk_buff *skb = NULL;
1875         struct net_device *dev;
1876         struct sockcm_cookie sockc;
1877         __be16 proto = 0;
1878         int err;
1879         int extra_len = 0;
1880
1881         /*
1882          *      Get and verify the address.
1883          */
1884
1885         if (saddr) {
1886                 if (msg->msg_namelen < sizeof(struct sockaddr))
1887                         return -EINVAL;
1888                 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1889                         proto = saddr->spkt_protocol;
1890         } else
1891                 return -ENOTCONN;       /* SOCK_PACKET must be sent giving an address */
1892
1893         /*
1894          *      Find the device first to size check it
1895          */
1896
1897         saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1898 retry:
1899         rcu_read_lock();
1900         dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1901         err = -ENODEV;
1902         if (dev == NULL)
1903                 goto out_unlock;
1904
1905         err = -ENETDOWN;
1906         if (!(dev->flags & IFF_UP))
1907                 goto out_unlock;
1908
1909         /*
1910          * You may not queue a frame bigger than the mtu. This is the lowest level
1911          * raw protocol and you must do your own fragmentation at this level.
1912          */
1913
1914         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1915                 if (!netif_supports_nofcs(dev)) {
1916                         err = -EPROTONOSUPPORT;
1917                         goto out_unlock;
1918                 }
1919                 extra_len = 4; /* We're doing our own CRC */
1920         }
1921
1922         err = -EMSGSIZE;
1923         if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1924                 goto out_unlock;
1925
1926         if (!skb) {
1927                 size_t reserved = LL_RESERVED_SPACE(dev);
1928                 int tlen = dev->needed_tailroom;
1929                 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1930
1931                 rcu_read_unlock();
1932                 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1933                 if (skb == NULL)
1934                         return -ENOBUFS;
1935                 /* FIXME: Save some space for broken drivers that write a hard
1936                  * header at transmission time by themselves. PPP is the notable
1937                  * one here. This should really be fixed at the driver level.
1938                  */
1939                 skb_reserve(skb, reserved);
1940                 skb_reset_network_header(skb);
1941
1942                 /* Try to align data part correctly */
1943                 if (hhlen) {
1944                         skb->data -= hhlen;
1945                         skb->tail -= hhlen;
1946                         if (len < hhlen)
1947                                 skb_reset_network_header(skb);
1948                 }
1949                 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1950                 if (err)
1951                         goto out_free;
1952                 goto retry;
1953         }
1954
1955         if (!dev_validate_header(dev, skb->data, len)) {
1956                 err = -EINVAL;
1957                 goto out_unlock;
1958         }
1959         if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1960             !packet_extra_vlan_len_allowed(dev, skb)) {
1961                 err = -EMSGSIZE;
1962                 goto out_unlock;
1963         }
1964
1965         sockcm_init(&sockc, sk);
1966         if (msg->msg_controllen) {
1967                 err = sock_cmsg_send(sk, msg, &sockc);
1968                 if (unlikely(err))
1969                         goto out_unlock;
1970         }
1971
1972         skb->protocol = proto;
1973         skb->dev = dev;
1974         skb->priority = sk->sk_priority;
1975         skb->mark = sk->sk_mark;
1976         skb->tstamp = sockc.transmit_time;
1977
1978         skb_setup_tx_timestamp(skb, sockc.tsflags);
1979
1980         if (unlikely(extra_len == 4))
1981                 skb->no_fcs = 1;
1982
1983         packet_parse_headers(skb, sock);
1984
1985         dev_queue_xmit(skb);
1986         rcu_read_unlock();
1987         return len;
1988
1989 out_unlock:
1990         rcu_read_unlock();
1991 out_free:
1992         kfree_skb(skb);
1993         return err;
1994 }
1995
1996 static unsigned int run_filter(struct sk_buff *skb,
1997                                const struct sock *sk,
1998                                unsigned int res)
1999 {
2000         struct sk_filter *filter;
2001
2002         rcu_read_lock();
2003         filter = rcu_dereference(sk->sk_filter);
2004         if (filter != NULL)
2005                 res = bpf_prog_run_clear_cb(filter->prog, skb);
2006         rcu_read_unlock();
2007
2008         return res;
2009 }
2010
2011 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2012                            size_t *len)
2013 {
2014         struct virtio_net_hdr vnet_hdr;
2015
2016         if (*len < sizeof(vnet_hdr))
2017                 return -EINVAL;
2018         *len -= sizeof(vnet_hdr);
2019
2020         if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2021                 return -EINVAL;
2022
2023         return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2024 }
2025
2026 /*
2027  * This function makes lazy skb cloning in hope that most of packets
2028  * are discarded by BPF.
2029  *
2030  * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2031  * and skb->cb are mangled. It works because (and until) packets
2032  * falling here are owned by current CPU. Output packets are cloned
2033  * by dev_queue_xmit_nit(), input packets are processed by net_bh
2034  * sequencially, so that if we return skb to original state on exit,
2035  * we will not harm anyone.
2036  */
2037
2038 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2039                       struct packet_type *pt, struct net_device *orig_dev)
2040 {
2041         struct sock *sk;
2042         struct sockaddr_ll *sll;
2043         struct packet_sock *po;
2044         u8 *skb_head = skb->data;
2045         int skb_len = skb->len;
2046         unsigned int snaplen, res;
2047         bool is_drop_n_account = false;
2048
2049         if (skb->pkt_type == PACKET_LOOPBACK)
2050                 goto drop;
2051
2052         sk = pt->af_packet_priv;
2053         po = pkt_sk(sk);
2054
2055         if (!net_eq(dev_net(dev), sock_net(sk)))
2056                 goto drop;
2057
2058         skb->dev = dev;
2059
2060         if (dev->header_ops) {
2061                 /* The device has an explicit notion of ll header,
2062                  * exported to higher levels.
2063                  *
2064                  * Otherwise, the device hides details of its frame
2065                  * structure, so that corresponding packet head is
2066                  * never delivered to user.
2067                  */
2068                 if (sk->sk_type != SOCK_DGRAM)
2069                         skb_push(skb, skb->data - skb_mac_header(skb));
2070                 else if (skb->pkt_type == PACKET_OUTGOING) {
2071                         /* Special case: outgoing packets have ll header at head */
2072                         skb_pull(skb, skb_network_offset(skb));
2073                 }
2074         }
2075
2076         snaplen = skb->len;
2077
2078         res = run_filter(skb, sk, snaplen);
2079         if (!res)
2080                 goto drop_n_restore;
2081         if (snaplen > res)
2082                 snaplen = res;
2083
2084         if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2085                 goto drop_n_acct;
2086
2087         if (skb_shared(skb)) {
2088                 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2089                 if (nskb == NULL)
2090                         goto drop_n_acct;
2091
2092                 if (skb_head != skb->data) {
2093                         skb->data = skb_head;
2094                         skb->len = skb_len;
2095                 }
2096                 consume_skb(skb);
2097                 skb = nskb;
2098         }
2099
2100         sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2101
2102         sll = &PACKET_SKB_CB(skb)->sa.ll;
2103         sll->sll_hatype = dev->type;
2104         sll->sll_pkttype = skb->pkt_type;
2105         if (unlikely(po->origdev))
2106                 sll->sll_ifindex = orig_dev->ifindex;
2107         else
2108                 sll->sll_ifindex = dev->ifindex;
2109
2110         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2111
2112         /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2113          * Use their space for storing the original skb length.
2114          */
2115         PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2116
2117         if (pskb_trim(skb, snaplen))
2118                 goto drop_n_acct;
2119
2120         skb_set_owner_r(skb, sk);
2121         skb->dev = NULL;
2122         skb_dst_drop(skb);
2123
2124         /* drop conntrack reference */
2125         nf_reset(skb);
2126
2127         spin_lock(&sk->sk_receive_queue.lock);
2128         po->stats.stats1.tp_packets++;
2129         sock_skb_set_dropcount(sk, skb);
2130         __skb_queue_tail(&sk->sk_receive_queue, skb);
2131         spin_unlock(&sk->sk_receive_queue.lock);
2132         sk->sk_data_ready(sk);
2133         return 0;
2134
2135 drop_n_acct:
2136         is_drop_n_account = true;
2137         spin_lock(&sk->sk_receive_queue.lock);
2138         po->stats.stats1.tp_drops++;
2139         atomic_inc(&sk->sk_drops);
2140         spin_unlock(&sk->sk_receive_queue.lock);
2141
2142 drop_n_restore:
2143         if (skb_head != skb->data && skb_shared(skb)) {
2144                 skb->data = skb_head;
2145                 skb->len = skb_len;
2146         }
2147 drop:
2148         if (!is_drop_n_account)
2149                 consume_skb(skb);
2150         else
2151                 kfree_skb(skb);
2152         return 0;
2153 }
2154
2155 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2156                        struct packet_type *pt, struct net_device *orig_dev)
2157 {
2158         struct sock *sk;
2159         struct packet_sock *po;
2160         struct sockaddr_ll *sll;
2161         union tpacket_uhdr h;
2162         u8 *skb_head = skb->data;
2163         int skb_len = skb->len;
2164         unsigned int snaplen, res;
2165         unsigned long status = TP_STATUS_USER;
2166         unsigned short macoff, netoff, hdrlen;
2167         struct sk_buff *copy_skb = NULL;
2168         struct timespec ts;
2169         __u32 ts_status;
2170         bool is_drop_n_account = false;
2171         bool do_vnet = false;
2172
2173         /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2174          * We may add members to them until current aligned size without forcing
2175          * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2176          */
2177         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2178         BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2179
2180         if (skb->pkt_type == PACKET_LOOPBACK)
2181                 goto drop;
2182
2183         sk = pt->af_packet_priv;
2184         po = pkt_sk(sk);
2185
2186         if (!net_eq(dev_net(dev), sock_net(sk)))
2187                 goto drop;
2188
2189         if (dev->header_ops) {
2190                 if (sk->sk_type != SOCK_DGRAM)
2191                         skb_push(skb, skb->data - skb_mac_header(skb));
2192                 else if (skb->pkt_type == PACKET_OUTGOING) {
2193                         /* Special case: outgoing packets have ll header at head */
2194                         skb_pull(skb, skb_network_offset(skb));
2195                 }
2196         }
2197
2198         snaplen = skb->len;
2199
2200         res = run_filter(skb, sk, snaplen);
2201         if (!res)
2202                 goto drop_n_restore;
2203
2204         if (skb->ip_summed == CHECKSUM_PARTIAL)
2205                 status |= TP_STATUS_CSUMNOTREADY;
2206         else if (skb->pkt_type != PACKET_OUTGOING &&
2207                  (skb->ip_summed == CHECKSUM_COMPLETE ||
2208                   skb_csum_unnecessary(skb)))
2209                 status |= TP_STATUS_CSUM_VALID;
2210
2211         if (snaplen > res)
2212                 snaplen = res;
2213
2214         if (sk->sk_type == SOCK_DGRAM) {
2215                 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2216                                   po->tp_reserve;
2217         } else {
2218                 unsigned int maclen = skb_network_offset(skb);
2219                 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2220                                        (maclen < 16 ? 16 : maclen)) +
2221                                        po->tp_reserve;
2222                 if (po->has_vnet_hdr) {
2223                         netoff += sizeof(struct virtio_net_hdr);
2224                         do_vnet = true;
2225                 }
2226                 macoff = netoff - maclen;
2227         }
2228         if (po->tp_version <= TPACKET_V2) {
2229                 if (macoff + snaplen > po->rx_ring.frame_size) {
2230                         if (po->copy_thresh &&
2231                             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2232                                 if (skb_shared(skb)) {
2233                                         copy_skb = skb_clone(skb, GFP_ATOMIC);
2234                                 } else {
2235                                         copy_skb = skb_get(skb);
2236                                         skb_head = skb->data;
2237                                 }
2238                                 if (copy_skb)
2239                                         skb_set_owner_r(copy_skb, sk);
2240                         }
2241                         snaplen = po->rx_ring.frame_size - macoff;
2242                         if ((int)snaplen < 0) {
2243                                 snaplen = 0;
2244                                 do_vnet = false;
2245                         }
2246                 }
2247         } else if (unlikely(macoff + snaplen >
2248                             GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2249                 u32 nval;
2250
2251                 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2252                 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2253                             snaplen, nval, macoff);
2254                 snaplen = nval;
2255                 if (unlikely((int)snaplen < 0)) {
2256                         snaplen = 0;
2257                         macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2258                         do_vnet = false;
2259                 }
2260         }
2261         spin_lock(&sk->sk_receive_queue.lock);
2262         h.raw = packet_current_rx_frame(po, skb,
2263                                         TP_STATUS_KERNEL, (macoff+snaplen));
2264         if (!h.raw)
2265                 goto drop_n_account;
2266         if (po->tp_version <= TPACKET_V2) {
2267                 packet_increment_rx_head(po, &po->rx_ring);
2268         /*
2269          * LOSING will be reported till you read the stats,
2270          * because it's COR - Clear On Read.
2271          * Anyways, moving it for V1/V2 only as V3 doesn't need this
2272          * at packet level.
2273          */
2274                 if (po->stats.stats1.tp_drops)
2275                         status |= TP_STATUS_LOSING;
2276         }
2277
2278         if (do_vnet &&
2279             virtio_net_hdr_from_skb(skb, h.raw + macoff -
2280                                     sizeof(struct virtio_net_hdr),
2281                                     vio_le(), true, 0))
2282                 goto drop_n_account;
2283
2284         po->stats.stats1.tp_packets++;
2285         if (copy_skb) {
2286                 status |= TP_STATUS_COPY;
2287                 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2288         }
2289         spin_unlock(&sk->sk_receive_queue.lock);
2290
2291         skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2292
2293         if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2294                 getnstimeofday(&ts);
2295
2296         status |= ts_status;
2297
2298         switch (po->tp_version) {
2299         case TPACKET_V1:
2300                 h.h1->tp_len = skb->len;
2301                 h.h1->tp_snaplen = snaplen;
2302                 h.h1->tp_mac = macoff;
2303                 h.h1->tp_net = netoff;
2304                 h.h1->tp_sec = ts.tv_sec;
2305                 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2306                 hdrlen = sizeof(*h.h1);
2307                 break;
2308         case TPACKET_V2:
2309                 h.h2->tp_len = skb->len;
2310                 h.h2->tp_snaplen = snaplen;
2311                 h.h2->tp_mac = macoff;
2312                 h.h2->tp_net = netoff;
2313                 h.h2->tp_sec = ts.tv_sec;
2314                 h.h2->tp_nsec = ts.tv_nsec;
2315                 if (skb_vlan_tag_present(skb)) {
2316                         h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2317                         h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2318                         status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2319                 } else {
2320                         h.h2->tp_vlan_tci = 0;
2321                         h.h2->tp_vlan_tpid = 0;
2322                 }
2323                 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2324                 hdrlen = sizeof(*h.h2);
2325                 break;
2326         case TPACKET_V3:
2327                 /* tp_nxt_offset,vlan are already populated above.
2328                  * So DONT clear those fields here
2329                  */
2330                 h.h3->tp_status |= status;
2331                 h.h3->tp_len = skb->len;
2332                 h.h3->tp_snaplen = snaplen;
2333                 h.h3->tp_mac = macoff;
2334                 h.h3->tp_net = netoff;
2335                 h.h3->tp_sec  = ts.tv_sec;
2336                 h.h3->tp_nsec = ts.tv_nsec;
2337                 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2338                 hdrlen = sizeof(*h.h3);
2339                 break;
2340         default:
2341                 BUG();
2342         }
2343
2344         sll = h.raw + TPACKET_ALIGN(hdrlen);
2345         sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2346         sll->sll_family = AF_PACKET;
2347         sll->sll_hatype = dev->type;
2348         sll->sll_protocol = skb->protocol;
2349         sll->sll_pkttype = skb->pkt_type;
2350         if (unlikely(po->origdev))
2351                 sll->sll_ifindex = orig_dev->ifindex;
2352         else
2353                 sll->sll_ifindex = dev->ifindex;
2354
2355         smp_mb();
2356
2357 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2358         if (po->tp_version <= TPACKET_V2) {
2359                 u8 *start, *end;
2360
2361                 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2362                                         macoff + snaplen);
2363
2364                 for (start = h.raw; start < end; start += PAGE_SIZE)
2365                         flush_dcache_page(pgv_to_page(start));
2366         }
2367         smp_wmb();
2368 #endif
2369
2370         if (po->tp_version <= TPACKET_V2) {
2371                 __packet_set_status(po, h.raw, status);
2372                 sk->sk_data_ready(sk);
2373         } else {
2374                 prb_clear_blk_fill_status(&po->rx_ring);
2375         }
2376
2377 drop_n_restore:
2378         if (skb_head != skb->data && skb_shared(skb)) {
2379                 skb->data = skb_head;
2380                 skb->len = skb_len;
2381         }
2382 drop:
2383         if (!is_drop_n_account)
2384                 consume_skb(skb);
2385         else
2386                 kfree_skb(skb);
2387         return 0;
2388
2389 drop_n_account:
2390         is_drop_n_account = true;
2391         po->stats.stats1.tp_drops++;
2392         spin_unlock(&sk->sk_receive_queue.lock);
2393
2394         sk->sk_data_ready(sk);
2395         kfree_skb(copy_skb);
2396         goto drop_n_restore;
2397 }
2398
2399 static void tpacket_destruct_skb(struct sk_buff *skb)
2400 {
2401         struct packet_sock *po = pkt_sk(skb->sk);
2402
2403         if (likely(po->tx_ring.pg_vec)) {
2404                 void *ph;
2405                 __u32 ts;
2406
2407                 ph = skb_zcopy_get_nouarg(skb);
2408                 packet_dec_pending(&po->tx_ring);
2409
2410                 ts = __packet_set_timestamp(po, ph, skb);
2411                 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2412         }
2413
2414         sock_wfree(skb);
2415 }
2416
2417 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2418 {
2419         if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2420             (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2421              __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2422               __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2423                 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2424                          __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2425                         __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2426
2427         if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2428                 return -EINVAL;
2429
2430         return 0;
2431 }
2432
2433 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2434                                  struct virtio_net_hdr *vnet_hdr)
2435 {
2436         if (*len < sizeof(*vnet_hdr))
2437                 return -EINVAL;
2438         *len -= sizeof(*vnet_hdr);
2439
2440         if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2441                 return -EFAULT;
2442
2443         return __packet_snd_vnet_parse(vnet_hdr, *len);
2444 }
2445
2446 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2447                 void *frame, struct net_device *dev, void *data, int tp_len,
2448                 __be16 proto, unsigned char *addr, int hlen, int copylen,
2449                 const struct sockcm_cookie *sockc)
2450 {
2451         union tpacket_uhdr ph;
2452         int to_write, offset, len, nr_frags, len_max;
2453         struct socket *sock = po->sk.sk_socket;
2454         struct page *page;
2455         int err;
2456
2457         ph.raw = frame;
2458
2459         skb->protocol = proto;
2460         skb->dev = dev;
2461         skb->priority = po->sk.sk_priority;
2462         skb->mark = po->sk.sk_mark;
2463         skb->tstamp = sockc->transmit_time;
2464         skb_setup_tx_timestamp(skb, sockc->tsflags);
2465         skb_zcopy_set_nouarg(skb, ph.raw);
2466
2467         skb_reserve(skb, hlen);
2468         skb_reset_network_header(skb);
2469
2470         to_write = tp_len;
2471
2472         if (sock->type == SOCK_DGRAM) {
2473                 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2474                                 NULL, tp_len);
2475                 if (unlikely(err < 0))
2476                         return -EINVAL;
2477         } else if (copylen) {
2478                 int hdrlen = min_t(int, copylen, tp_len);
2479
2480                 skb_push(skb, dev->hard_header_len);
2481                 skb_put(skb, copylen - dev->hard_header_len);
2482                 err = skb_store_bits(skb, 0, data, hdrlen);
2483                 if (unlikely(err))
2484                         return err;
2485                 if (!dev_validate_header(dev, skb->data, hdrlen))
2486                         return -EINVAL;
2487
2488                 data += hdrlen;
2489                 to_write -= hdrlen;
2490         }
2491
2492         offset = offset_in_page(data);
2493         len_max = PAGE_SIZE - offset;
2494         len = ((to_write > len_max) ? len_max : to_write);
2495
2496         skb->data_len = to_write;
2497         skb->len += to_write;
2498         skb->truesize += to_write;
2499         refcount_add(to_write, &po->sk.sk_wmem_alloc);
2500
2501         while (likely(to_write)) {
2502                 nr_frags = skb_shinfo(skb)->nr_frags;
2503
2504                 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2505                         pr_err("Packet exceed the number of skb frags(%lu)\n",
2506                                MAX_SKB_FRAGS);
2507                         return -EFAULT;
2508                 }
2509
2510                 page = pgv_to_page(data);
2511                 data += len;
2512                 flush_dcache_page(page);
2513                 get_page(page);
2514                 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2515                 to_write -= len;
2516                 offset = 0;
2517                 len_max = PAGE_SIZE;
2518                 len = ((to_write > len_max) ? len_max : to_write);
2519         }
2520
2521         packet_parse_headers(skb, sock);
2522
2523         return tp_len;
2524 }
2525
2526 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2527                                 int size_max, void **data)
2528 {
2529         union tpacket_uhdr ph;
2530         int tp_len, off;
2531
2532         ph.raw = frame;
2533
2534         switch (po->tp_version) {
2535         case TPACKET_V3:
2536                 if (ph.h3->tp_next_offset != 0) {
2537                         pr_warn_once("variable sized slot not supported");
2538                         return -EINVAL;
2539                 }
2540                 tp_len = ph.h3->tp_len;
2541                 break;
2542         case TPACKET_V2:
2543                 tp_len = ph.h2->tp_len;
2544                 break;
2545         default:
2546                 tp_len = ph.h1->tp_len;
2547                 break;
2548         }
2549         if (unlikely(tp_len > size_max)) {
2550                 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2551                 return -EMSGSIZE;
2552         }
2553
2554         if (unlikely(po->tp_tx_has_off)) {
2555                 int off_min, off_max;
2556
2557                 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2558                 off_max = po->tx_ring.frame_size - tp_len;
2559                 if (po->sk.sk_type == SOCK_DGRAM) {
2560                         switch (po->tp_version) {
2561                         case TPACKET_V3:
2562                                 off = ph.h3->tp_net;
2563                                 break;
2564                         case TPACKET_V2:
2565                                 off = ph.h2->tp_net;
2566                                 break;
2567                         default:
2568                                 off = ph.h1->tp_net;
2569                                 break;
2570                         }
2571                 } else {
2572                         switch (po->tp_version) {
2573                         case TPACKET_V3:
2574                                 off = ph.h3->tp_mac;
2575                                 break;
2576                         case TPACKET_V2:
2577                                 off = ph.h2->tp_mac;
2578                                 break;
2579                         default:
2580                                 off = ph.h1->tp_mac;
2581                                 break;
2582                         }
2583                 }
2584                 if (unlikely((off < off_min) || (off_max < off)))
2585                         return -EINVAL;
2586         } else {
2587                 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2588         }
2589
2590         *data = frame + off;
2591         return tp_len;
2592 }
2593
2594 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2595 {
2596         struct sk_buff *skb;
2597         struct net_device *dev;
2598         struct virtio_net_hdr *vnet_hdr = NULL;
2599         struct sockcm_cookie sockc;
2600         __be16 proto;
2601         int err, reserve = 0;
2602         void *ph;
2603         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2604         bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2605         int tp_len, size_max;
2606         unsigned char *addr;
2607         void *data;
2608         int len_sum = 0;
2609         int status = TP_STATUS_AVAILABLE;
2610         int hlen, tlen, copylen = 0;
2611
2612         mutex_lock(&po->pg_vec_lock);
2613
2614         if (likely(saddr == NULL)) {
2615                 dev     = packet_cached_dev_get(po);
2616                 proto   = po->num;
2617                 addr    = NULL;
2618         } else {
2619                 err = -EINVAL;
2620                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2621                         goto out;
2622                 if (msg->msg_namelen < (saddr->sll_halen
2623                                         + offsetof(struct sockaddr_ll,
2624                                                 sll_addr)))
2625                         goto out;
2626                 proto   = saddr->sll_protocol;
2627                 addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
2628                 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2629                 if (addr && dev && saddr->sll_halen < dev->addr_len)
2630                         goto out_put;
2631         }
2632
2633         err = -ENXIO;
2634         if (unlikely(dev == NULL))
2635                 goto out;
2636         err = -ENETDOWN;
2637         if (unlikely(!(dev->flags & IFF_UP)))
2638                 goto out_put;
2639
2640         sockcm_init(&sockc, &po->sk);
2641         if (msg->msg_controllen) {
2642                 err = sock_cmsg_send(&po->sk, msg, &sockc);
2643                 if (unlikely(err))
2644                         goto out_put;
2645         }
2646
2647         if (po->sk.sk_socket->type == SOCK_RAW)
2648                 reserve = dev->hard_header_len;
2649         size_max = po->tx_ring.frame_size
2650                 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2651
2652         if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2653                 size_max = dev->mtu + reserve + VLAN_HLEN;
2654
2655         do {
2656                 ph = packet_current_frame(po, &po->tx_ring,
2657                                           TP_STATUS_SEND_REQUEST);
2658                 if (unlikely(ph == NULL)) {
2659                         if (need_wait && need_resched())
2660                                 schedule();
2661                         continue;
2662                 }
2663
2664                 skb = NULL;
2665                 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2666                 if (tp_len < 0)
2667                         goto tpacket_error;
2668
2669                 status = TP_STATUS_SEND_REQUEST;
2670                 hlen = LL_RESERVED_SPACE(dev);
2671                 tlen = dev->needed_tailroom;
2672                 if (po->has_vnet_hdr) {
2673                         vnet_hdr = data;
2674                         data += sizeof(*vnet_hdr);
2675                         tp_len -= sizeof(*vnet_hdr);
2676                         if (tp_len < 0 ||
2677                             __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2678                                 tp_len = -EINVAL;
2679                                 goto tpacket_error;
2680                         }
2681                         copylen = __virtio16_to_cpu(vio_le(),
2682                                                     vnet_hdr->hdr_len);
2683                 }
2684                 copylen = max_t(int, copylen, dev->hard_header_len);
2685                 skb = sock_alloc_send_skb(&po->sk,
2686                                 hlen + tlen + sizeof(struct sockaddr_ll) +
2687                                 (copylen - dev->hard_header_len),
2688                                 !need_wait, &err);
2689
2690                 if (unlikely(skb == NULL)) {
2691                         /* we assume the socket was initially writeable ... */
2692                         if (likely(len_sum > 0))
2693                                 err = len_sum;
2694                         goto out_status;
2695                 }
2696                 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2697                                           addr, hlen, copylen, &sockc);
2698                 if (likely(tp_len >= 0) &&
2699                     tp_len > dev->mtu + reserve &&
2700                     !po->has_vnet_hdr &&
2701                     !packet_extra_vlan_len_allowed(dev, skb))
2702                         tp_len = -EMSGSIZE;
2703
2704                 if (unlikely(tp_len < 0)) {
2705 tpacket_error:
2706                         if (po->tp_loss) {
2707                                 __packet_set_status(po, ph,
2708                                                 TP_STATUS_AVAILABLE);
2709                                 packet_increment_head(&po->tx_ring);
2710                                 kfree_skb(skb);
2711                                 continue;
2712                         } else {
2713                                 status = TP_STATUS_WRONG_FORMAT;
2714                                 err = tp_len;
2715                                 goto out_status;
2716                         }
2717                 }
2718
2719                 if (po->has_vnet_hdr) {
2720                         if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2721                                 tp_len = -EINVAL;
2722                                 goto tpacket_error;
2723                         }
2724                         virtio_net_hdr_set_proto(skb, vnet_hdr);
2725                 }
2726
2727                 skb->destructor = tpacket_destruct_skb;
2728                 __packet_set_status(po, ph, TP_STATUS_SENDING);
2729                 packet_inc_pending(&po->tx_ring);
2730
2731                 status = TP_STATUS_SEND_REQUEST;
2732                 err = po->xmit(skb);
2733                 if (unlikely(err > 0)) {
2734                         err = net_xmit_errno(err);
2735                         if (err && __packet_get_status(po, ph) ==
2736                                    TP_STATUS_AVAILABLE) {
2737                                 /* skb was destructed already */
2738                                 skb = NULL;
2739                                 goto out_status;
2740                         }
2741                         /*
2742                          * skb was dropped but not destructed yet;
2743                          * let's treat it like congestion or err < 0
2744                          */
2745                         err = 0;
2746                 }
2747                 packet_increment_head(&po->tx_ring);
2748                 len_sum += tp_len;
2749         } while (likely((ph != NULL) ||
2750                 /* Note: packet_read_pending() might be slow if we have
2751                  * to call it as it's per_cpu variable, but in fast-path
2752                  * we already short-circuit the loop with the first
2753                  * condition, and luckily don't have to go that path
2754                  * anyway.
2755                  */
2756                  (need_wait && packet_read_pending(&po->tx_ring))));
2757
2758         err = len_sum;
2759         goto out_put;
2760
2761 out_status:
2762         __packet_set_status(po, ph, status);
2763         kfree_skb(skb);
2764 out_put:
2765         dev_put(dev);
2766 out:
2767         mutex_unlock(&po->pg_vec_lock);
2768         return err;
2769 }
2770
2771 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2772                                         size_t reserve, size_t len,
2773                                         size_t linear, int noblock,
2774                                         int *err)
2775 {
2776         struct sk_buff *skb;
2777
2778         /* Under a page?  Don't bother with paged skb. */
2779         if (prepad + len < PAGE_SIZE || !linear)
2780                 linear = len;
2781
2782         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2783                                    err, 0);
2784         if (!skb)
2785                 return NULL;
2786
2787         skb_reserve(skb, reserve);
2788         skb_put(skb, linear);
2789         skb->data_len = len - linear;
2790         skb->len += len - linear;
2791
2792         return skb;
2793 }
2794
2795 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2796 {
2797         struct sock *sk = sock->sk;
2798         DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2799         struct sk_buff *skb;
2800         struct net_device *dev;
2801         __be16 proto;
2802         unsigned char *addr;
2803         int err, reserve = 0;
2804         struct sockcm_cookie sockc;
2805         struct virtio_net_hdr vnet_hdr = { 0 };
2806         int offset = 0;
2807         struct packet_sock *po = pkt_sk(sk);
2808         bool has_vnet_hdr = false;
2809         int hlen, tlen, linear;
2810         int extra_len = 0;
2811
2812         /*
2813          *      Get and verify the address.
2814          */
2815
2816         if (likely(saddr == NULL)) {
2817                 dev     = packet_cached_dev_get(po);
2818                 proto   = po->num;
2819                 addr    = NULL;
2820         } else {
2821                 err = -EINVAL;
2822                 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2823                         goto out;
2824                 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2825                         goto out;
2826                 proto   = saddr->sll_protocol;
2827                 addr    = saddr->sll_halen ? saddr->sll_addr : NULL;
2828                 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2829                 if (addr && dev && saddr->sll_halen < dev->addr_len)
2830                         goto out_unlock;
2831         }
2832
2833         err = -ENXIO;
2834         if (unlikely(dev == NULL))
2835                 goto out_unlock;
2836         err = -ENETDOWN;
2837         if (unlikely(!(dev->flags & IFF_UP)))
2838                 goto out_unlock;
2839
2840         sockcm_init(&sockc, sk);
2841         sockc.mark = sk->sk_mark;
2842         if (msg->msg_controllen) {
2843                 err = sock_cmsg_send(sk, msg, &sockc);
2844                 if (unlikely(err))
2845                         goto out_unlock;
2846         }
2847
2848         if (sock->type == SOCK_RAW)
2849                 reserve = dev->hard_header_len;
2850         if (po->has_vnet_hdr) {
2851                 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2852                 if (err)
2853                         goto out_unlock;
2854                 has_vnet_hdr = true;
2855         }
2856
2857         if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2858                 if (!netif_supports_nofcs(dev)) {
2859                         err = -EPROTONOSUPPORT;
2860                         goto out_unlock;
2861                 }
2862                 extra_len = 4; /* We're doing our own CRC */
2863         }
2864
2865         err = -EMSGSIZE;
2866         if (!vnet_hdr.gso_type &&
2867             (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2868                 goto out_unlock;
2869
2870         err = -ENOBUFS;
2871         hlen = LL_RESERVED_SPACE(dev);
2872         tlen = dev->needed_tailroom;
2873         linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2874         linear = max(linear, min_t(int, len, dev->hard_header_len));
2875         skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2876                                msg->msg_flags & MSG_DONTWAIT, &err);
2877         if (skb == NULL)
2878                 goto out_unlock;
2879
2880         skb_reset_network_header(skb);
2881
2882         err = -EINVAL;
2883         if (sock->type == SOCK_DGRAM) {
2884                 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2885                 if (unlikely(offset < 0))
2886                         goto out_free;
2887         } else if (reserve) {
2888                 skb_reserve(skb, -reserve);
2889                 if (len < reserve + sizeof(struct ipv6hdr) &&
2890                     dev->min_header_len != dev->hard_header_len)
2891                         skb_reset_network_header(skb);
2892         }
2893
2894         /* Returns -EFAULT on error */
2895         err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2896         if (err)
2897                 goto out_free;
2898
2899         if (sock->type == SOCK_RAW &&
2900             !dev_validate_header(dev, skb->data, len)) {
2901                 err = -EINVAL;
2902                 goto out_free;
2903         }
2904
2905         skb_setup_tx_timestamp(skb, sockc.tsflags);
2906
2907         if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2908             !packet_extra_vlan_len_allowed(dev, skb)) {
2909                 err = -EMSGSIZE;
2910                 goto out_free;
2911         }
2912
2913         skb->protocol = proto;
2914         skb->dev = dev;
2915         skb->priority = sk->sk_priority;
2916         skb->mark = sockc.mark;
2917         skb->tstamp = sockc.transmit_time;
2918
2919         if (has_vnet_hdr) {
2920                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2921                 if (err)
2922                         goto out_free;
2923                 len += sizeof(vnet_hdr);
2924                 virtio_net_hdr_set_proto(skb, &vnet_hdr);
2925         }
2926
2927         packet_parse_headers(skb, sock);
2928
2929         if (unlikely(extra_len == 4))
2930                 skb->no_fcs = 1;
2931
2932         err = po->xmit(skb);
2933         if (err > 0 && (err = net_xmit_errno(err)) != 0)
2934                 goto out_unlock;
2935
2936         dev_put(dev);
2937
2938         return len;
2939
2940 out_free:
2941         kfree_skb(skb);
2942 out_unlock:
2943         if (dev)
2944                 dev_put(dev);
2945 out:
2946         return err;
2947 }
2948
2949 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2950 {
2951         struct sock *sk = sock->sk;
2952         struct packet_sock *po = pkt_sk(sk);
2953
2954         if (po->tx_ring.pg_vec)
2955                 return tpacket_snd(po, msg);
2956         else
2957                 return packet_snd(sock, msg, len);
2958 }
2959
2960 /*
2961  *      Close a PACKET socket. This is fairly simple. We immediately go
2962  *      to 'closed' state and remove our protocol entry in the device list.
2963  */
2964
2965 static int packet_release(struct socket *sock)
2966 {
2967         struct sock *sk = sock->sk;
2968         struct packet_sock *po;
2969         struct packet_fanout *f;
2970         struct net *net;
2971         union tpacket_req_u req_u;
2972
2973         if (!sk)
2974                 return 0;
2975
2976         net = sock_net(sk);
2977         po = pkt_sk(sk);
2978
2979         mutex_lock(&net->packet.sklist_lock);
2980         sk_del_node_init_rcu(sk);
2981         mutex_unlock(&net->packet.sklist_lock);
2982
2983         preempt_disable();
2984         sock_prot_inuse_add(net, sk->sk_prot, -1);
2985         preempt_enable();
2986
2987         spin_lock(&po->bind_lock);
2988         unregister_prot_hook(sk, false);
2989         packet_cached_dev_reset(po);
2990
2991         if (po->prot_hook.dev) {
2992                 dev_put(po->prot_hook.dev);
2993                 po->prot_hook.dev = NULL;
2994         }
2995         spin_unlock(&po->bind_lock);
2996
2997         packet_flush_mclist(sk);
2998
2999         lock_sock(sk);
3000         if (po->rx_ring.pg_vec) {
3001                 memset(&req_u, 0, sizeof(req_u));
3002                 packet_set_ring(sk, &req_u, 1, 0);
3003         }
3004
3005         if (po->tx_ring.pg_vec) {
3006                 memset(&req_u, 0, sizeof(req_u));
3007                 packet_set_ring(sk, &req_u, 1, 1);
3008         }
3009         release_sock(sk);
3010
3011         f = fanout_release(sk);
3012
3013         synchronize_net();
3014
3015         if (f) {
3016                 kfree(po->rollover);
3017                 fanout_release_data(f);
3018                 kfree(f);
3019         }
3020         /*
3021          *      Now the socket is dead. No more input will appear.
3022          */
3023         sock_orphan(sk);
3024         sock->sk = NULL;
3025
3026         /* Purge queues */
3027
3028         skb_queue_purge(&sk->sk_receive_queue);
3029         packet_free_pending(po);
3030         sk_refcnt_debug_release(sk);
3031
3032         sock_put(sk);
3033         return 0;
3034 }
3035
3036 /*
3037  *      Attach a packet hook.
3038  */
3039
3040 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3041                           __be16 proto)
3042 {
3043         struct packet_sock *po = pkt_sk(sk);
3044         struct net_device *dev_curr;
3045         __be16 proto_curr;
3046         bool need_rehook;
3047         struct net_device *dev = NULL;
3048         int ret = 0;
3049         bool unlisted = false;
3050
3051         lock_sock(sk);
3052         spin_lock(&po->bind_lock);
3053         rcu_read_lock();
3054
3055         if (po->fanout) {
3056                 ret = -EINVAL;
3057                 goto out_unlock;
3058         }
3059
3060         if (name) {
3061                 dev = dev_get_by_name_rcu(sock_net(sk), name);
3062                 if (!dev) {
3063                         ret = -ENODEV;
3064                         goto out_unlock;
3065                 }
3066         } else if (ifindex) {
3067                 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3068                 if (!dev) {
3069                         ret = -ENODEV;
3070                         goto out_unlock;
3071                 }
3072         }
3073
3074         if (dev)
3075                 dev_hold(dev);
3076
3077         proto_curr = po->prot_hook.type;
3078         dev_curr = po->prot_hook.dev;
3079
3080         need_rehook = proto_curr != proto || dev_curr != dev;
3081
3082         if (need_rehook) {
3083                 if (po->running) {
3084                         rcu_read_unlock();
3085                         /* prevents packet_notifier() from calling
3086                          * register_prot_hook()
3087                          */
3088                         po->num = 0;
3089                         __unregister_prot_hook(sk, true);
3090                         rcu_read_lock();
3091                         dev_curr = po->prot_hook.dev;
3092                         if (dev)
3093                                 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3094                                                                  dev->ifindex);
3095                 }
3096
3097                 BUG_ON(po->running);
3098                 po->num = proto;
3099                 po->prot_hook.type = proto;
3100
3101                 if (unlikely(unlisted)) {
3102                         dev_put(dev);
3103                         po->prot_hook.dev = NULL;
3104                         po->ifindex = -1;
3105                         packet_cached_dev_reset(po);
3106                 } else {
3107                         po->prot_hook.dev = dev;
3108                         po->ifindex = dev ? dev->ifindex : 0;
3109                         packet_cached_dev_assign(po, dev);
3110                 }
3111         }
3112         if (dev_curr)
3113                 dev_put(dev_curr);
3114
3115         if (proto == 0 || !need_rehook)
3116                 goto out_unlock;
3117
3118         if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3119                 register_prot_hook(sk);
3120         } else {
3121                 sk->sk_err = ENETDOWN;
3122                 if (!sock_flag(sk, SOCK_DEAD))
3123                         sk->sk_error_report(sk);
3124         }
3125
3126 out_unlock:
3127         rcu_read_unlock();
3128         spin_unlock(&po->bind_lock);
3129         release_sock(sk);
3130         return ret;
3131 }
3132
3133 /*
3134  *      Bind a packet socket to a device
3135  */
3136
3137 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3138                             int addr_len)
3139 {
3140         struct sock *sk = sock->sk;
3141         char name[sizeof(uaddr->sa_data) + 1];
3142
3143         /*
3144          *      Check legality
3145          */
3146
3147         if (addr_len != sizeof(struct sockaddr))
3148                 return -EINVAL;
3149         /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3150          * zero-terminated.
3151          */
3152         memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3153         name[sizeof(uaddr->sa_data)] = 0;
3154
3155         return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3156 }
3157
3158 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3159 {
3160         struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3161         struct sock *sk = sock->sk;
3162
3163         /*
3164          *      Check legality
3165          */
3166
3167         if (addr_len < sizeof(struct sockaddr_ll))
3168                 return -EINVAL;
3169         if (sll->sll_family != AF_PACKET)
3170                 return -EINVAL;
3171
3172         return packet_do_bind(sk, NULL, sll->sll_ifindex,
3173                               sll->sll_protocol ? : pkt_sk(sk)->num);
3174 }
3175
3176 static struct proto packet_proto = {
3177         .name     = "PACKET",
3178         .owner    = THIS_MODULE,
3179         .obj_size = sizeof(struct packet_sock),
3180 };
3181
3182 /*
3183  *      Create a packet of type SOCK_PACKET.
3184  */
3185
3186 static int packet_create(struct net *net, struct socket *sock, int protocol,
3187                          int kern)
3188 {
3189         struct sock *sk;
3190         struct packet_sock *po;
3191         __be16 proto = (__force __be16)protocol; /* weird, but documented */
3192         int err;
3193
3194         if (!ns_capable(net->user_ns, CAP_NET_RAW))
3195                 return -EPERM;
3196         if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3197             sock->type != SOCK_PACKET)
3198                 return -ESOCKTNOSUPPORT;
3199
3200         sock->state = SS_UNCONNECTED;
3201
3202         err = -ENOBUFS;
3203         sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3204         if (sk == NULL)
3205                 goto out;
3206
3207         sock->ops = &packet_ops;
3208         if (sock->type == SOCK_PACKET)
3209                 sock->ops = &packet_ops_spkt;
3210
3211         sock_init_data(sock, sk);
3212
3213         po = pkt_sk(sk);
3214         sk->sk_family = PF_PACKET;
3215         po->num = proto;
3216         po->xmit = dev_queue_xmit;
3217
3218         err = packet_alloc_pending(po);
3219         if (err)
3220                 goto out2;
3221
3222         packet_cached_dev_reset(po);
3223
3224         sk->sk_destruct = packet_sock_destruct;
3225         sk_refcnt_debug_inc(sk);
3226
3227         /*
3228          *      Attach a protocol block
3229          */
3230
3231         spin_lock_init(&po->bind_lock);
3232         mutex_init(&po->pg_vec_lock);
3233         po->rollover = NULL;
3234         po->prot_hook.func = packet_rcv;
3235
3236         if (sock->type == SOCK_PACKET)
3237                 po->prot_hook.func = packet_rcv_spkt;
3238
3239         po->prot_hook.af_packet_priv = sk;
3240
3241         if (proto) {
3242                 po->prot_hook.type = proto;
3243                 __register_prot_hook(sk);
3244         }
3245
3246         mutex_lock(&net->packet.sklist_lock);
3247         sk_add_node_tail_rcu(sk, &net->packet.sklist);
3248         mutex_unlock(&net->packet.sklist_lock);
3249
3250         preempt_disable();
3251         sock_prot_inuse_add(net, &packet_proto, 1);
3252         preempt_enable();
3253
3254         return 0;
3255 out2:
3256         sk_free(sk);
3257 out:
3258         return err;
3259 }
3260
3261 /*
3262  *      Pull a packet from our receive queue and hand it to the user.
3263  *      If necessary we block.
3264  */
3265
3266 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3267                           int flags)
3268 {
3269         struct sock *sk = sock->sk;
3270         struct sk_buff *skb;
3271         int copied, err;
3272         int vnet_hdr_len = 0;
3273         unsigned int origlen = 0;
3274
3275         err = -EINVAL;
3276         if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3277                 goto out;
3278
3279 #if 0
3280         /* What error should we return now? EUNATTACH? */
3281         if (pkt_sk(sk)->ifindex < 0)
3282                 return -ENODEV;
3283 #endif
3284
3285         if (flags & MSG_ERRQUEUE) {
3286                 err = sock_recv_errqueue(sk, msg, len,
3287                                          SOL_PACKET, PACKET_TX_TIMESTAMP);
3288                 goto out;
3289         }
3290
3291         /*
3292          *      Call the generic datagram receiver. This handles all sorts
3293          *      of horrible races and re-entrancy so we can forget about it
3294          *      in the protocol layers.
3295          *
3296          *      Now it will return ENETDOWN, if device have just gone down,
3297          *      but then it will block.
3298          */
3299
3300         skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3301
3302         /*
3303          *      An error occurred so return it. Because skb_recv_datagram()
3304          *      handles the blocking we don't see and worry about blocking
3305          *      retries.
3306          */
3307
3308         if (skb == NULL)
3309                 goto out;
3310
3311         if (pkt_sk(sk)->pressure)
3312                 packet_rcv_has_room(pkt_sk(sk), NULL);
3313
3314         if (pkt_sk(sk)->has_vnet_hdr) {
3315                 err = packet_rcv_vnet(msg, skb, &len);
3316                 if (err)
3317                         goto out_free;
3318                 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3319         }
3320
3321         /* You lose any data beyond the buffer you gave. If it worries
3322          * a user program they can ask the device for its MTU
3323          * anyway.
3324          */
3325         copied = skb->len;
3326         if (copied > len) {
3327                 copied = len;
3328                 msg->msg_flags |= MSG_TRUNC;
3329         }
3330
3331         err = skb_copy_datagram_msg(skb, 0, msg, copied);
3332         if (err)
3333                 goto out_free;
3334
3335         if (sock->type != SOCK_PACKET) {
3336                 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3337
3338                 /* Original length was stored in sockaddr_ll fields */
3339                 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3340                 sll->sll_family = AF_PACKET;
3341                 sll->sll_protocol = skb->protocol;
3342         }
3343
3344         sock_recv_ts_and_drops(msg, sk, skb);
3345
3346         if (msg->msg_name) {
3347                 int copy_len;
3348
3349                 /* If the address length field is there to be filled
3350                  * in, we fill it in now.
3351                  */
3352                 if (sock->type == SOCK_PACKET) {
3353                         __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3354                         msg->msg_namelen = sizeof(struct sockaddr_pkt);
3355                         copy_len = msg->msg_namelen;
3356                 } else {
3357                         struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3358
3359                         msg->msg_namelen = sll->sll_halen +
3360                                 offsetof(struct sockaddr_ll, sll_addr);
3361                         copy_len = msg->msg_namelen;
3362                         if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3363                                 memset(msg->msg_name +
3364                                        offsetof(struct sockaddr_ll, sll_addr),
3365                                        0, sizeof(sll->sll_addr));
3366                                 msg->msg_namelen = sizeof(struct sockaddr_ll);
3367                         }
3368                 }
3369                 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3370         }
3371
3372         if (pkt_sk(sk)->auxdata) {
3373                 struct tpacket_auxdata aux;
3374
3375                 aux.tp_status = TP_STATUS_USER;
3376                 if (skb->ip_summed == CHECKSUM_PARTIAL)
3377                         aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3378                 else if (skb->pkt_type != PACKET_OUTGOING &&
3379                          (skb->ip_summed == CHECKSUM_COMPLETE ||
3380                           skb_csum_unnecessary(skb)))
3381                         aux.tp_status |= TP_STATUS_CSUM_VALID;
3382
3383                 aux.tp_len = origlen;
3384                 aux.tp_snaplen = skb->len;
3385                 aux.tp_mac = 0;
3386                 aux.tp_net = skb_network_offset(skb);
3387                 if (skb_vlan_tag_present(skb)) {
3388                         aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3389                         aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3390                         aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3391                 } else {
3392                         aux.tp_vlan_tci = 0;
3393                         aux.tp_vlan_tpid = 0;
3394                 }
3395                 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3396         }
3397
3398         /*
3399          *      Free or return the buffer as appropriate. Again this
3400          *      hides all the races and re-entrancy issues from us.
3401          */
3402         err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3403
3404 out_free:
3405         skb_free_datagram(sk, skb);
3406 out:
3407         return err;
3408 }
3409
3410 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3411                                int peer)
3412 {
3413         struct net_device *dev;
3414         struct sock *sk = sock->sk;
3415
3416         if (peer)
3417                 return -EOPNOTSUPP;
3418
3419         uaddr->sa_family = AF_PACKET;
3420         memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3421         rcu_read_lock();
3422         dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3423         if (dev)
3424                 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3425         rcu_read_unlock();
3426
3427         return sizeof(*uaddr);
3428 }
3429
3430 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3431                           int peer)
3432 {
3433         struct net_device *dev;
3434         struct sock *sk = sock->sk;
3435         struct packet_sock *po = pkt_sk(sk);
3436         DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3437
3438         if (peer)
3439                 return -EOPNOTSUPP;
3440
3441         sll->sll_family = AF_PACKET;
3442         sll->sll_ifindex = po->ifindex;
3443         sll->sll_protocol = po->num;
3444         sll->sll_pkttype = 0;
3445         rcu_read_lock();
3446         dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3447         if (dev) {
3448                 sll->sll_hatype = dev->type;
3449                 sll->sll_halen = dev->addr_len;
3450                 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3451         } else {
3452                 sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
3453                 sll->sll_halen = 0;
3454         }
3455         rcu_read_unlock();
3456
3457         return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3458 }
3459
3460 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3461                          int what)
3462 {
3463         switch (i->type) {
3464         case PACKET_MR_MULTICAST:
3465                 if (i->alen != dev->addr_len)
3466                         return -EINVAL;
3467                 if (what > 0)
3468                         return dev_mc_add(dev, i->addr);
3469                 else
3470                         return dev_mc_del(dev, i->addr);
3471                 break;
3472         case PACKET_MR_PROMISC:
3473                 return dev_set_promiscuity(dev, what);
3474         case PACKET_MR_ALLMULTI:
3475                 return dev_set_allmulti(dev, what);
3476         case PACKET_MR_UNICAST:
3477                 if (i->alen != dev->addr_len)
3478                         return -EINVAL;
3479                 if (what > 0)
3480                         return dev_uc_add(dev, i->addr);
3481                 else
3482                         return dev_uc_del(dev, i->addr);
3483                 break;
3484         default:
3485                 break;
3486         }
3487         return 0;
3488 }
3489
3490 static void packet_dev_mclist_delete(struct net_device *dev,
3491                                      struct packet_mclist **mlp)
3492 {
3493         struct packet_mclist *ml;
3494
3495         while ((ml = *mlp) != NULL) {
3496                 if (ml->ifindex == dev->ifindex) {
3497                         packet_dev_mc(dev, ml, -1);
3498                         *mlp = ml->next;
3499                         kfree(ml);
3500                 } else
3501                         mlp = &ml->next;
3502         }
3503 }
3504
3505 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3506 {
3507         struct packet_sock *po = pkt_sk(sk);
3508         struct packet_mclist *ml, *i;
3509         struct net_device *dev;
3510         int err;
3511
3512         rtnl_lock();
3513
3514         err = -ENODEV;
3515         dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3516         if (!dev)
3517                 goto done;
3518
3519         err = -EINVAL;
3520         if (mreq->mr_alen > dev->addr_len)
3521                 goto done;
3522
3523         err = -ENOBUFS;
3524         i = kmalloc(sizeof(*i), GFP_KERNEL);
3525         if (i == NULL)
3526                 goto done;
3527
3528         err = 0;
3529         for (ml = po->mclist; ml; ml = ml->next) {
3530                 if (ml->ifindex == mreq->mr_ifindex &&
3531                     ml->type == mreq->mr_type &&
3532                     ml->alen == mreq->mr_alen &&
3533                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3534                         ml->count++;
3535                         /* Free the new element ... */
3536                         kfree(i);
3537                         goto done;
3538                 }
3539         }
3540
3541         i->type = mreq->mr_type;
3542         i->ifindex = mreq->mr_ifindex;
3543         i->alen = mreq->mr_alen;
3544         memcpy(i->addr, mreq->mr_address, i->alen);
3545         memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3546         i->count = 1;
3547         i->next = po->mclist;
3548         po->mclist = i;
3549         err = packet_dev_mc(dev, i, 1);
3550         if (err) {
3551                 po->mclist = i->next;
3552                 kfree(i);
3553         }
3554
3555 done:
3556         rtnl_unlock();
3557         return err;
3558 }
3559
3560 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3561 {
3562         struct packet_mclist *ml, **mlp;
3563
3564         rtnl_lock();
3565
3566         for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3567                 if (ml->ifindex == mreq->mr_ifindex &&
3568                     ml->type == mreq->mr_type &&
3569                     ml->alen == mreq->mr_alen &&
3570                     memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3571                         if (--ml->count == 0) {
3572                                 struct net_device *dev;
3573                                 *mlp = ml->next;
3574                                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3575                                 if (dev)
3576                                         packet_dev_mc(dev, ml, -1);
3577                                 kfree(ml);
3578                         }
3579                         break;
3580                 }
3581         }
3582         rtnl_unlock();
3583         return 0;
3584 }
3585
3586 static void packet_flush_mclist(struct sock *sk)
3587 {
3588         struct packet_sock *po = pkt_sk(sk);
3589         struct packet_mclist *ml;
3590
3591         if (!po->mclist)
3592                 return;
3593
3594         rtnl_lock();
3595         while ((ml = po->mclist) != NULL) {
3596                 struct net_device *dev;
3597
3598                 po->mclist = ml->next;
3599                 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3600                 if (dev != NULL)
3601                         packet_dev_mc(dev, ml, -1);
3602                 kfree(ml);
3603         }
3604         rtnl_unlock();
3605 }
3606
3607 static int
3608 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3609 {
3610         struct sock *sk = sock->sk;
3611         struct packet_sock *po = pkt_sk(sk);
3612         int ret;
3613
3614         if (level != SOL_PACKET)
3615                 return -ENOPROTOOPT;
3616
3617         switch (optname) {
3618         case PACKET_ADD_MEMBERSHIP:
3619         case PACKET_DROP_MEMBERSHIP:
3620         {
3621                 struct packet_mreq_max mreq;
3622                 int len = optlen;
3623                 memset(&mreq, 0, sizeof(mreq));
3624                 if (len < sizeof(struct packet_mreq))
3625                         return -EINVAL;
3626                 if (len > sizeof(mreq))
3627                         len = sizeof(mreq);
3628                 if (copy_from_user(&mreq, optval, len))
3629                         return -EFAULT;
3630                 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3631                         return -EINVAL;
3632                 if (optname == PACKET_ADD_MEMBERSHIP)
3633                         ret = packet_mc_add(sk, &mreq);
3634                 else
3635                         ret = packet_mc_drop(sk, &mreq);
3636                 return ret;
3637         }
3638
3639         case PACKET_RX_RING:
3640         case PACKET_TX_RING:
3641         {
3642                 union tpacket_req_u req_u;
3643                 int len;
3644
3645                 lock_sock(sk);
3646                 switch (po->tp_version) {
3647                 case TPACKET_V1:
3648                 case TPACKET_V2:
3649                         len = sizeof(req_u.req);
3650                         break;
3651                 case TPACKET_V3:
3652                 default:
3653                         len = sizeof(req_u.req3);
3654                         break;
3655                 }
3656                 if (optlen < len) {
3657                         ret = -EINVAL;
3658                 } else {
3659                         if (copy_from_user(&req_u.req, optval, len))
3660                                 ret = -EFAULT;
3661                         else
3662                                 ret = packet_set_ring(sk, &req_u, 0,
3663                                                     optname == PACKET_TX_RING);
3664                 }
3665                 release_sock(sk);
3666                 return ret;
3667         }
3668         case PACKET_COPY_THRESH:
3669         {
3670                 int val;
3671
3672                 if (optlen != sizeof(val))
3673                         return -EINVAL;
3674                 if (copy_from_user(&val, optval, sizeof(val)))
3675                         return -EFAULT;
3676
3677                 pkt_sk(sk)->copy_thresh = val;
3678                 return 0;
3679         }
3680         case PACKET_VERSION:
3681         {
3682                 int val;
3683
3684                 if (optlen != sizeof(val))
3685                         return -EINVAL;
3686                 if (copy_from_user(&val, optval, sizeof(val)))
3687                         return -EFAULT;
3688                 switch (val) {
3689                 case TPACKET_V1:
3690                 case TPACKET_V2:
3691                 case TPACKET_V3:
3692                         break;
3693                 default:
3694                         return -EINVAL;
3695                 }
3696                 lock_sock(sk);
3697                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3698                         ret = -EBUSY;
3699                 } else {
3700                         po->tp_version = val;
3701                         ret = 0;
3702                 }
3703                 release_sock(sk);
3704                 return ret;
3705         }
3706         case PACKET_RESERVE:
3707         {
3708                 unsigned int val;
3709
3710                 if (optlen != sizeof(val))
3711                         return -EINVAL;
3712                 if (copy_from_user(&val, optval, sizeof(val)))
3713                         return -EFAULT;
3714                 if (val > INT_MAX)
3715                         return -EINVAL;
3716                 lock_sock(sk);
3717                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3718                         ret = -EBUSY;
3719                 } else {
3720                         po->tp_reserve = val;
3721                         ret = 0;
3722                 }
3723                 release_sock(sk);
3724                 return ret;
3725         }
3726         case PACKET_LOSS:
3727         {
3728                 unsigned int val;
3729
3730                 if (optlen != sizeof(val))
3731                         return -EINVAL;
3732                 if (copy_from_user(&val, optval, sizeof(val)))
3733                         return -EFAULT;
3734
3735                 lock_sock(sk);
3736                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3737                         ret = -EBUSY;
3738                 } else {
3739                         po->tp_loss = !!val;
3740                         ret = 0;
3741                 }
3742                 release_sock(sk);
3743                 return ret;
3744         }
3745         case PACKET_AUXDATA:
3746         {
3747                 int val;
3748
3749                 if (optlen < sizeof(val))
3750                         return -EINVAL;
3751                 if (copy_from_user(&val, optval, sizeof(val)))
3752                         return -EFAULT;
3753
3754                 lock_sock(sk);
3755                 po->auxdata = !!val;
3756                 release_sock(sk);
3757                 return 0;
3758         }
3759         case PACKET_ORIGDEV:
3760         {
3761                 int val;
3762
3763                 if (optlen < sizeof(val))
3764                         return -EINVAL;
3765                 if (copy_from_user(&val, optval, sizeof(val)))
3766                         return -EFAULT;
3767
3768                 lock_sock(sk);
3769                 po->origdev = !!val;
3770                 release_sock(sk);
3771                 return 0;
3772         }
3773         case PACKET_VNET_HDR:
3774         {
3775                 int val;
3776
3777                 if (sock->type != SOCK_RAW)
3778                         return -EINVAL;
3779                 if (optlen < sizeof(val))
3780                         return -EINVAL;
3781                 if (copy_from_user(&val, optval, sizeof(val)))
3782                         return -EFAULT;
3783
3784                 lock_sock(sk);
3785                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3786                         ret = -EBUSY;
3787                 } else {
3788                         po->has_vnet_hdr = !!val;
3789                         ret = 0;
3790                 }
3791                 release_sock(sk);
3792                 return ret;
3793         }
3794         case PACKET_TIMESTAMP:
3795         {
3796                 int val;
3797
3798                 if (optlen != sizeof(val))
3799                         return -EINVAL;
3800                 if (copy_from_user(&val, optval, sizeof(val)))
3801                         return -EFAULT;
3802
3803                 po->tp_tstamp = val;
3804                 return 0;
3805         }
3806         case PACKET_FANOUT:
3807         {
3808                 int val;
3809
3810                 if (optlen != sizeof(val))
3811                         return -EINVAL;
3812                 if (copy_from_user(&val, optval, sizeof(val)))
3813                         return -EFAULT;
3814
3815                 return fanout_add(sk, val & 0xffff, val >> 16);
3816         }
3817         case PACKET_FANOUT_DATA:
3818         {
3819                 if (!po->fanout)
3820                         return -EINVAL;
3821
3822                 return fanout_set_data(po, optval, optlen);
3823         }
3824         case PACKET_IGNORE_OUTGOING:
3825         {
3826                 int val;
3827
3828                 if (optlen != sizeof(val))
3829                         return -EINVAL;
3830                 if (copy_from_user(&val, optval, sizeof(val)))
3831                         return -EFAULT;
3832                 if (val < 0 || val > 1)
3833                         return -EINVAL;
3834
3835                 po->prot_hook.ignore_outgoing = !!val;
3836                 return 0;
3837         }
3838         case PACKET_TX_HAS_OFF:
3839         {
3840                 unsigned int val;
3841
3842                 if (optlen != sizeof(val))
3843                         return -EINVAL;
3844                 if (copy_from_user(&val, optval, sizeof(val)))
3845                         return -EFAULT;
3846
3847                 lock_sock(sk);
3848                 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3849                         ret = -EBUSY;
3850                 } else {
3851                         po->tp_tx_has_off = !!val;
3852                         ret = 0;
3853                 }
3854                 release_sock(sk);
3855                 return 0;
3856         }
3857         case PACKET_QDISC_BYPASS:
3858         {
3859                 int val;
3860
3861                 if (optlen != sizeof(val))
3862                         return -EINVAL;
3863                 if (copy_from_user(&val, optval, sizeof(val)))
3864                         return -EFAULT;
3865
3866                 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3867                 return 0;
3868         }
3869         default:
3870                 return -ENOPROTOOPT;
3871         }
3872 }
3873
3874 static int packet_getsockopt(struct socket *sock, int level, int optname,
3875                              char __user *optval, int __user *optlen)
3876 {
3877         int len;
3878         int val, lv = sizeof(val);
3879         struct sock *sk = sock->sk;
3880         struct packet_sock *po = pkt_sk(sk);
3881         void *data = &val;
3882         union tpacket_stats_u st;
3883         struct tpacket_rollover_stats rstats;
3884
3885         if (level != SOL_PACKET)
3886                 return -ENOPROTOOPT;
3887
3888         if (get_user(len, optlen))
3889                 return -EFAULT;
3890
3891         if (len < 0)
3892                 return -EINVAL;
3893
3894         switch (optname) {
3895         case PACKET_STATISTICS:
3896                 spin_lock_bh(&sk->sk_receive_queue.lock);
3897                 memcpy(&st, &po->stats, sizeof(st));
3898                 memset(&po->stats, 0, sizeof(po->stats));
3899                 spin_unlock_bh(&sk->sk_receive_queue.lock);
3900
3901                 if (po->tp_version == TPACKET_V3) {
3902                         lv = sizeof(struct tpacket_stats_v3);
3903                         st.stats3.tp_packets += st.stats3.tp_drops;
3904                         data = &st.stats3;
3905                 } else {
3906                         lv = sizeof(struct tpacket_stats);
3907                         st.stats1.tp_packets += st.stats1.tp_drops;
3908                         data = &st.stats1;
3909                 }
3910
3911                 break;
3912         case PACKET_AUXDATA:
3913                 val = po->auxdata;
3914                 break;
3915         case PACKET_ORIGDEV:
3916                 val = po->origdev;
3917                 break;
3918         case PACKET_VNET_HDR:
3919                 val = po->has_vnet_hdr;
3920                 break;
3921         case PACKET_VERSION:
3922                 val = po->tp_version;
3923                 break;
3924         case PACKET_HDRLEN:
3925                 if (len > sizeof(int))
3926                         len = sizeof(int);
3927                 if (len < sizeof(int))
3928                         return -EINVAL;
3929                 if (copy_from_user(&val, optval, len))
3930                         return -EFAULT;
3931                 switch (val) {
3932                 case TPACKET_V1:
3933                         val = sizeof(struct tpacket_hdr);
3934                         break;
3935                 case TPACKET_V2:
3936                         val = sizeof(struct tpacket2_hdr);
3937                         break;
3938                 case TPACKET_V3:
3939                         val = sizeof(struct tpacket3_hdr);
3940                         break;
3941                 default:
3942                         return -EINVAL;
3943                 }
3944                 break;
3945         case PACKET_RESERVE:
3946                 val = po->tp_reserve;
3947                 break;
3948         case PACKET_LOSS:
3949                 val = po->tp_loss;
3950                 break;
3951         case PACKET_TIMESTAMP:
3952                 val = po->tp_tstamp;
3953                 break;
3954         case PACKET_FANOUT:
3955                 val = (po->fanout ?
3956                        ((u32)po->fanout->id |
3957                         ((u32)po->fanout->type << 16) |
3958                         ((u32)po->fanout->flags << 24)) :
3959                        0);
3960                 break;
3961         case PACKET_IGNORE_OUTGOING:
3962                 val = po->prot_hook.ignore_outgoing;
3963                 break;
3964         case PACKET_ROLLOVER_STATS:
3965                 if (!po->rollover)
3966                         return -EINVAL;
3967                 rstats.tp_all = atomic_long_read(&po->rollover->num);
3968                 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3969                 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3970                 data = &rstats;
3971                 lv = sizeof(rstats);
3972                 break;
3973         case PACKET_TX_HAS_OFF:
3974                 val = po->tp_tx_has_off;
3975                 break;
3976         case PACKET_QDISC_BYPASS:
3977                 val = packet_use_direct_xmit(po);
3978                 break;
3979         default:
3980                 return -ENOPROTOOPT;
3981         }
3982
3983         if (len > lv)
3984                 len = lv;
3985         if (put_user(len, optlen))
3986                 return -EFAULT;
3987         if (copy_to_user(optval, data, len))
3988                 return -EFAULT;
3989         return 0;
3990 }
3991
3992
3993 #ifdef CONFIG_COMPAT
3994 static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3995                                     char __user *optval, unsigned int optlen)
3996 {
3997         struct packet_sock *po = pkt_sk(sock->sk);
3998
3999         if (level != SOL_PACKET)
4000                 return -ENOPROTOOPT;
4001
4002         if (optname == PACKET_FANOUT_DATA &&
4003             po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
4004                 optval = (char __user *)get_compat_bpf_fprog(optval);
4005                 if (!optval)
4006                         return -EFAULT;
4007                 optlen = sizeof(struct sock_fprog);
4008         }
4009
4010         return packet_setsockopt(sock, level, optname, optval, optlen);
4011 }
4012 #endif
4013
4014 static int packet_notifier(struct notifier_block *this,
4015                            unsigned long msg, void *ptr)
4016 {
4017         struct sock *sk;
4018         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4019         struct net *net = dev_net(dev);
4020
4021         rcu_read_lock();
4022         sk_for_each_rcu(sk, &net->packet.sklist) {
4023                 struct packet_sock *po = pkt_sk(sk);
4024
4025                 switch (msg) {
4026                 case NETDEV_UNREGISTER:
4027                         if (po->mclist)
4028                                 packet_dev_mclist_delete(dev, &po->mclist);
4029                         /* fallthrough */
4030
4031                 case NETDEV_DOWN:
4032                         if (dev->ifindex == po->ifindex) {
4033                                 spin_lock(&po->bind_lock);
4034                                 if (po->running) {
4035                                         __unregister_prot_hook(sk, false);
4036                                         sk->sk_err = ENETDOWN;
4037                                         if (!sock_flag(sk, SOCK_DEAD))
4038                                                 sk->sk_error_report(sk);
4039                                 }
4040                                 if (msg == NETDEV_UNREGISTER) {
4041                                         packet_cached_dev_reset(po);
4042                                         po->ifindex = -1;
4043                                         if (po->prot_hook.dev)
4044                                                 dev_put(po->prot_hook.dev);
4045                                         po->prot_hook.dev = NULL;
4046                                 }
4047                                 spin_unlock(&po->bind_lock);
4048                         }
4049                         break;
4050                 case NETDEV_UP:
4051                         if (dev->ifindex == po->ifindex) {
4052                                 spin_lock(&po->bind_lock);
4053                                 if (po->num)
4054                                         register_prot_hook(sk);
4055                                 spin_unlock(&po->bind_lock);
4056                         }
4057                         break;
4058                 }
4059         }
4060         rcu_read_unlock();
4061         return NOTIFY_DONE;
4062 }
4063
4064
4065 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4066                         unsigned long arg)
4067 {
4068         struct sock *sk = sock->sk;
4069
4070         switch (cmd) {
4071         case SIOCOUTQ:
4072         {
4073                 int amount = sk_wmem_alloc_get(sk);
4074
4075                 return put_user(amount, (int __user *)arg);
4076         }
4077         case SIOCINQ:
4078         {
4079                 struct sk_buff *skb;
4080                 int amount = 0;
4081
4082                 spin_lock_bh(&sk->sk_receive_queue.lock);
4083                 skb = skb_peek(&sk->sk_receive_queue);
4084                 if (skb)
4085                         amount = skb->len;
4086                 spin_unlock_bh(&sk->sk_receive_queue.lock);
4087                 return put_user(amount, (int __user *)arg);
4088         }
4089         case SIOCGSTAMP:
4090                 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4091         case SIOCGSTAMPNS:
4092                 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4093
4094 #ifdef CONFIG_INET
4095         case SIOCADDRT:
4096         case SIOCDELRT:
4097         case SIOCDARP:
4098         case SIOCGARP:
4099         case SIOCSARP:
4100         case SIOCGIFADDR:
4101         case SIOCSIFADDR:
4102         case SIOCGIFBRDADDR:
4103         case SIOCSIFBRDADDR:
4104         case SIOCGIFNETMASK:
4105         case SIOCSIFNETMASK:
4106         case SIOCGIFDSTADDR:
4107         case SIOCSIFDSTADDR:
4108         case SIOCSIFFLAGS:
4109                 return inet_dgram_ops.ioctl(sock, cmd, arg);
4110 #endif
4111
4112         default:
4113                 return -ENOIOCTLCMD;
4114         }
4115         return 0;
4116 }
4117
4118 static __poll_t packet_poll(struct file *file, struct socket *sock,
4119                                 poll_table *wait)
4120 {
4121         struct sock *sk = sock->sk;
4122         struct packet_sock *po = pkt_sk(sk);
4123         __poll_t mask = datagram_poll(file, sock, wait);
4124
4125         spin_lock_bh(&sk->sk_receive_queue.lock);
4126         if (po->rx_ring.pg_vec) {
4127                 if (!packet_previous_rx_frame(po, &po->rx_ring,
4128                         TP_STATUS_KERNEL))
4129                         mask |= EPOLLIN | EPOLLRDNORM;
4130         }
4131         if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4132                 po->pressure = 0;
4133         spin_unlock_bh(&sk->sk_receive_queue.lock);
4134         spin_lock_bh(&sk->sk_write_queue.lock);
4135         if (po->tx_ring.pg_vec) {
4136                 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4137                         mask |= EPOLLOUT | EPOLLWRNORM;
4138         }
4139         spin_unlock_bh(&sk->sk_write_queue.lock);
4140         return mask;
4141 }
4142
4143
4144 /* Dirty? Well, I still did not learn better way to account
4145  * for user mmaps.
4146  */
4147
4148 static void packet_mm_open(struct vm_area_struct *vma)
4149 {
4150         struct file *file = vma->vm_file;
4151         struct socket *sock = file->private_data;
4152         struct sock *sk = sock->sk;
4153
4154         if (sk)
4155                 atomic_inc(&pkt_sk(sk)->mapped);
4156 }
4157
4158 static void packet_mm_close(struct vm_area_struct *vma)
4159 {
4160         struct file *file = vma->vm_file;
4161         struct socket *sock = file->private_data;
4162         struct sock *sk = sock->sk;
4163
4164         if (sk)
4165                 atomic_dec(&pkt_sk(sk)->mapped);
4166 }
4167
4168 static const struct vm_operations_struct packet_mmap_ops = {
4169         .open   =       packet_mm_open,
4170         .close  =       packet_mm_close,
4171 };
4172
4173 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4174                         unsigned int len)
4175 {
4176         int i;
4177
4178         for (i = 0; i < len; i++) {
4179                 if (likely(pg_vec[i].buffer)) {
4180                         if (is_vmalloc_addr(pg_vec[i].buffer))
4181                                 vfree(pg_vec[i].buffer);
4182                         else
4183                                 free_pages((unsigned long)pg_vec[i].buffer,
4184                                            order);
4185                         pg_vec[i].buffer = NULL;
4186                 }
4187         }
4188         kfree(pg_vec);
4189 }
4190
4191 static char *alloc_one_pg_vec_page(unsigned long order)
4192 {
4193         char *buffer;
4194         gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4195                           __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4196
4197         buffer = (char *) __get_free_pages(gfp_flags, order);
4198         if (buffer)
4199                 return buffer;
4200
4201         /* __get_free_pages failed, fall back to vmalloc */
4202         buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4203         if (buffer)
4204                 return buffer;
4205
4206         /* vmalloc failed, lets dig into swap here */
4207         gfp_flags &= ~__GFP_NORETRY;
4208         buffer = (char *) __get_free_pages(gfp_flags, order);
4209         if (buffer)
4210                 return buffer;
4211
4212         /* complete and utter failure */
4213         return NULL;
4214 }
4215
4216 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4217 {
4218         unsigned int block_nr = req->tp_block_nr;
4219         struct pgv *pg_vec;
4220         int i;
4221
4222         pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4223         if (unlikely(!pg_vec))
4224                 goto out;
4225
4226         for (i = 0; i < block_nr; i++) {
4227                 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4228                 if (unlikely(!pg_vec[i].buffer))
4229                         goto out_free_pgvec;
4230         }
4231
4232 out:
4233         return pg_vec;
4234
4235 out_free_pgvec:
4236         free_pg_vec(pg_vec, order, block_nr);
4237         pg_vec = NULL;
4238         goto out;
4239 }
4240
4241 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4242                 int closing, int tx_ring)
4243 {
4244         struct pgv *pg_vec = NULL;
4245         struct packet_sock *po = pkt_sk(sk);
4246         int was_running, order = 0;
4247         struct packet_ring_buffer *rb;
4248         struct sk_buff_head *rb_queue;
4249         __be16 num;
4250         int err = -EINVAL;
4251         /* Added to avoid minimal code churn */
4252         struct tpacket_req *req = &req_u->req;
4253
4254         rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4255         rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4256
4257         err = -EBUSY;
4258         if (!closing) {
4259                 if (atomic_read(&po->mapped))
4260                         goto out;
4261                 if (packet_read_pending(rb))
4262                         goto out;
4263         }
4264
4265         if (req->tp_block_nr) {
4266                 unsigned int min_frame_size;
4267
4268                 /* Sanity tests and some calculations */
4269                 err = -EBUSY;
4270                 if (unlikely(rb->pg_vec))
4271                         goto out;
4272
4273                 switch (po->tp_version) {
4274                 case TPACKET_V1:
4275                         po->tp_hdrlen = TPACKET_HDRLEN;
4276                         break;
4277                 case TPACKET_V2:
4278                         po->tp_hdrlen = TPACKET2_HDRLEN;
4279                         break;
4280                 case TPACKET_V3:
4281                         po->tp_hdrlen = TPACKET3_HDRLEN;
4282                         break;
4283                 }
4284
4285                 err = -EINVAL;
4286                 if (unlikely((int)req->tp_block_size <= 0))
4287                         goto out;
4288                 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4289                         goto out;
4290                 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4291                 if (po->tp_version >= TPACKET_V3 &&
4292                     req->tp_block_size <
4293                     BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4294                         goto out;
4295                 if (unlikely(req->tp_frame_size < min_frame_size))
4296                         goto out;
4297                 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4298                         goto out;
4299
4300                 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4301                 if (unlikely(rb->frames_per_block == 0))
4302                         goto out;
4303                 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4304                         goto out;
4305                 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4306                                         req->tp_frame_nr))
4307                         goto out;
4308
4309                 err = -ENOMEM;
4310                 order = get_order(req->tp_block_size);
4311                 pg_vec = alloc_pg_vec(req, order);
4312                 if (unlikely(!pg_vec))
4313                         goto out;
4314                 switch (po->tp_version) {
4315                 case TPACKET_V3:
4316                         /* Block transmit is not supported yet */
4317                         if (!tx_ring) {
4318                                 init_prb_bdqc(po, rb, pg_vec, req_u);
4319                         } else {
4320                                 struct tpacket_req3 *req3 = &req_u->req3;
4321
4322                                 if (req3->tp_retire_blk_tov ||
4323                                     req3->tp_sizeof_priv ||
4324                                     req3->tp_feature_req_word) {
4325                                         err = -EINVAL;
4326                                         goto out;
4327                                 }
4328                         }
4329                         break;
4330                 default:
4331                         break;
4332                 }
4333         }
4334         /* Done */
4335         else {
4336                 err = -EINVAL;
4337                 if (unlikely(req->tp_frame_nr))
4338                         goto out;
4339         }
4340
4341
4342         /* Detach socket from network */
4343         spin_lock(&po->bind_lock);
4344         was_running = po->running;
4345         num = po->num;
4346         if (was_running) {
4347                 po->num = 0;
4348                 __unregister_prot_hook(sk, false);
4349         }
4350         spin_unlock(&po->bind_lock);
4351
4352         synchronize_net();
4353
4354         err = -EBUSY;
4355         mutex_lock(&po->pg_vec_lock);
4356         if (closing || atomic_read(&po->mapped) == 0) {
4357                 err = 0;
4358                 spin_lock_bh(&rb_queue->lock);
4359                 swap(rb->pg_vec, pg_vec);
4360                 rb->frame_max = (req->tp_frame_nr - 1);
4361                 rb->head = 0;
4362                 rb->frame_size = req->tp_frame_size;
4363                 spin_unlock_bh(&rb_queue->lock);
4364
4365                 swap(rb->pg_vec_order, order);
4366                 swap(rb->pg_vec_len, req->tp_block_nr);
4367
4368                 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4369                 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4370                                                 tpacket_rcv : packet_rcv;
4371                 skb_queue_purge(rb_queue);
4372                 if (atomic_read(&po->mapped))
4373                         pr_err("packet_mmap: vma is busy: %d\n",
4374                                atomic_read(&po->mapped));
4375         }
4376         mutex_unlock(&po->pg_vec_lock);
4377
4378         spin_lock(&po->bind_lock);
4379         if (was_running) {
4380                 po->num = num;
4381                 register_prot_hook(sk);
4382         }
4383         spin_unlock(&po->bind_lock);
4384         if (pg_vec && (po->tp_version > TPACKET_V2)) {
4385                 /* Because we don't support block-based V3 on tx-ring */
4386                 if (!tx_ring)
4387                         prb_shutdown_retire_blk_timer(po, rb_queue);
4388         }
4389
4390         if (pg_vec)
4391                 free_pg_vec(pg_vec, order, req->tp_block_nr);
4392 out:
4393         return err;
4394 }
4395
4396 static int packet_mmap(struct file *file, struct socket *sock,
4397                 struct vm_area_struct *vma)
4398 {
4399         struct sock *sk = sock->sk;
4400         struct packet_sock *po = pkt_sk(sk);
4401         unsigned long size, expected_size;
4402         struct packet_ring_buffer *rb;
4403         unsigned long start;
4404         int err = -EINVAL;
4405         int i;
4406
4407         if (vma->vm_pgoff)
4408                 return -EINVAL;
4409
4410         mutex_lock(&po->pg_vec_lock);
4411
4412         expected_size = 0;
4413         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4414                 if (rb->pg_vec) {
4415                         expected_size += rb->pg_vec_len
4416                                                 * rb->pg_vec_pages
4417                                                 * PAGE_SIZE;
4418                 }
4419         }
4420
4421         if (expected_size == 0)
4422                 goto out;
4423
4424         size = vma->vm_end - vma->vm_start;
4425         if (size != expected_size)
4426                 goto out;
4427
4428         start = vma->vm_start;
4429         for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4430                 if (rb->pg_vec == NULL)
4431                         continue;
4432
4433                 for (i = 0; i < rb->pg_vec_len; i++) {
4434                         struct page *page;
4435                         void *kaddr = rb->pg_vec[i].buffer;
4436                         int pg_num;
4437
4438                         for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4439                                 page = pgv_to_page(kaddr);
4440                                 err = vm_insert_page(vma, start, page);
4441                                 if (unlikely(err))
4442                                         goto out;
4443                                 start += PAGE_SIZE;
4444                                 kaddr += PAGE_SIZE;
4445                         }
4446                 }
4447         }
4448
4449         atomic_inc(&po->mapped);
4450         vma->vm_ops = &packet_mmap_ops;
4451         err = 0;
4452
4453 out:
4454         mutex_unlock(&po->pg_vec_lock);
4455         return err;
4456 }
4457
4458 static const struct proto_ops packet_ops_spkt = {
4459         .family =       PF_PACKET,
4460         .owner =        THIS_MODULE,
4461         .release =      packet_release,
4462         .bind =         packet_bind_spkt,
4463         .connect =      sock_no_connect,
4464         .socketpair =   sock_no_socketpair,
4465         .accept =       sock_no_accept,
4466         .getname =      packet_getname_spkt,
4467         .poll =         datagram_poll,
4468         .ioctl =        packet_ioctl,
4469         .listen =       sock_no_listen,
4470         .shutdown =     sock_no_shutdown,
4471         .setsockopt =   sock_no_setsockopt,
4472         .getsockopt =   sock_no_getsockopt,
4473         .sendmsg =      packet_sendmsg_spkt,
4474         .recvmsg =      packet_recvmsg,
4475         .mmap =         sock_no_mmap,
4476         .sendpage =     sock_no_sendpage,
4477 };
4478
4479 static const struct proto_ops packet_ops = {
4480         .family =       PF_PACKET,
4481         .owner =        THIS_MODULE,
4482         .release =      packet_release,
4483         .bind =         packet_bind,
4484         .connect =      sock_no_connect,
4485         .socketpair =   sock_no_socketpair,
4486         .accept =       sock_no_accept,
4487         .getname =      packet_getname,
4488         .poll =         packet_poll,
4489         .ioctl =        packet_ioctl,
4490         .listen =       sock_no_listen,
4491         .shutdown =     sock_no_shutdown,
4492         .setsockopt =   packet_setsockopt,
4493         .getsockopt =   packet_getsockopt,
4494 #ifdef CONFIG_COMPAT
4495         .compat_setsockopt = compat_packet_setsockopt,
4496 #endif
4497         .sendmsg =      packet_sendmsg,
4498         .recvmsg =      packet_recvmsg,
4499         .mmap =         packet_mmap,
4500         .sendpage =     sock_no_sendpage,
4501 };
4502
4503 static const struct net_proto_family packet_family_ops = {
4504         .family =       PF_PACKET,
4505         .create =       packet_create,
4506         .owner  =       THIS_MODULE,
4507 };
4508
4509 static struct notifier_block packet_netdev_notifier = {
4510         .notifier_call =        packet_notifier,
4511 };
4512
4513 #ifdef CONFIG_PROC_FS
4514
4515 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4516         __acquires(RCU)
4517 {
4518         struct net *net = seq_file_net(seq);
4519
4520         rcu_read_lock();
4521         return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4522 }
4523
4524 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4525 {
4526         struct net *net = seq_file_net(seq);
4527         return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4528 }
4529
4530 static void packet_seq_stop(struct seq_file *seq, void *v)
4531         __releases(RCU)
4532 {
4533         rcu_read_unlock();
4534 }
4535
4536 static int packet_seq_show(struct seq_file *seq, void *v)
4537 {
4538         if (v == SEQ_START_TOKEN)
4539                 seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4540         else {
4541                 struct sock *s = sk_entry(v);
4542                 const struct packet_sock *po = pkt_sk(s);
4543
4544                 seq_printf(seq,
4545                            "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4546                            s,
4547                            refcount_read(&s->sk_refcnt),
4548                            s->sk_type,
4549                            ntohs(po->num),
4550                            po->ifindex,
4551                            po->running,
4552                            atomic_read(&s->sk_rmem_alloc),
4553                            from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4554                            sock_i_ino(s));
4555         }
4556
4557         return 0;
4558 }
4559
4560 static const struct seq_operations packet_seq_ops = {
4561         .start  = packet_seq_start,
4562         .next   = packet_seq_next,
4563         .stop   = packet_seq_stop,
4564         .show   = packet_seq_show,
4565 };
4566 #endif
4567
4568 static int __net_init packet_net_init(struct net *net)
4569 {
4570         mutex_init(&net->packet.sklist_lock);
4571         INIT_HLIST_HEAD(&net->packet.sklist);
4572
4573         if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4574                         sizeof(struct seq_net_private)))
4575                 return -ENOMEM;
4576
4577         return 0;
4578 }
4579
4580 static void __net_exit packet_net_exit(struct net *net)
4581 {
4582         remove_proc_entry("packet", net->proc_net);
4583         WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4584 }
4585
4586 static struct pernet_operations packet_net_ops = {
4587         .init = packet_net_init,
4588         .exit = packet_net_exit,
4589 };
4590
4591
4592 static void __exit packet_exit(void)
4593 {
4594         unregister_netdevice_notifier(&packet_netdev_notifier);
4595         unregister_pernet_subsys(&packet_net_ops);
4596         sock_unregister(PF_PACKET);
4597         proto_unregister(&packet_proto);
4598 }
4599
4600 static int __init packet_init(void)
4601 {
4602         int rc = proto_register(&packet_proto, 0);
4603
4604         if (rc != 0)
4605                 goto out;
4606
4607         sock_register(&packet_family_ops);
4608         register_pernet_subsys(&packet_net_ops);
4609         register_netdevice_notifier(&packet_netdev_notifier);
4610 out:
4611         return rc;
4612 }
4613
4614 module_init(packet_init);
4615 module_exit(packet_exit);
4616 MODULE_LICENSE("GPL");
4617 MODULE_ALIAS_NETPROTO(PF_PACKET);