1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * PACKET - implements raw packet sockets.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
49 #include <linux/types.h>
51 #include <linux/capability.h>
52 #include <linux/fcntl.h>
53 #include <linux/socket.h>
55 #include <linux/inet.h>
56 #include <linux/netdevice.h>
57 #include <linux/if_packet.h>
58 #include <linux/wireless.h>
59 #include <linux/kernel.h>
60 #include <linux/kmod.h>
61 #include <linux/slab.h>
62 #include <linux/vmalloc.h>
63 #include <net/net_namespace.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <linux/uaccess.h>
71 #include <asm/ioctls.h>
73 #include <asm/cacheflush.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
80 #include <linux/mutex.h>
81 #include <linux/if_vlan.h>
82 #include <linux/virtio_net.h>
83 #include <linux/errqueue.h>
84 #include <linux/net_tstamp.h>
85 #include <linux/percpu.h>
87 #include <net/inet_common.h>
89 #include <linux/bpf.h>
90 #include <net/compat.h>
96 - If the device has no dev->header_ops, there is no LL header visible
97 above the device. In this case, its hard_header_len should be 0.
98 The device may prepend its own header internally. In this case, its
99 needed_headroom should be set to the space needed for it to add its
101 For example, a WiFi driver pretending to be an Ethernet driver should
102 set its hard_header_len to be the Ethernet header length, and set its
103 needed_headroom to be (the real WiFi header length - the fake Ethernet
105 - packet socket receives packets with pulled ll header,
106 so that SOCK_RAW should push it back.
111 Incoming, dev->header_ops != NULL
112 mac_header -> ll header
115 Outgoing, dev->header_ops != NULL
116 mac_header -> ll header
119 Incoming, dev->header_ops == NULL
121 However drivers often make it point to the ll header.
122 This is incorrect because the ll header should be invisible to us.
125 Outgoing, dev->header_ops == NULL
126 mac_header -> data. ll header is invisible to us.
130 If dev->header_ops == NULL we are unable to restore the ll header,
131 because it is invisible to us.
137 dev->header_ops != NULL
138 mac_header -> ll header
141 dev->header_ops == NULL (ll header is invisible to us)
145 We should set network_header on output to the correct position,
146 packet classifier depends on it.
149 /* Private packet socket structures. */
151 /* identical to struct packet_mreq except it has
152 * a longer address field.
154 struct packet_mreq_max {
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
162 struct tpacket_hdr *h1;
163 struct tpacket2_hdr *h2;
164 struct tpacket3_hdr *h3;
168 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
169 int closing, int tx_ring);
171 #define V3_ALIGNMENT (8)
173 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175 #define BLK_PLUS_PRIV(sz_of_priv) \
176 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
179 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
180 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
181 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
182 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
183 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
187 struct packet_type *pt, struct net_device *orig_dev);
189 static void *packet_previous_frame(struct packet_sock *po,
190 struct packet_ring_buffer *rb,
192 static void packet_increment_head(struct packet_ring_buffer *buff);
193 static int prb_curr_blk_in_use(struct tpacket_block_desc *);
194 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
195 struct packet_sock *);
196 static void prb_retire_current_block(struct tpacket_kbdq_core *,
197 struct packet_sock *, unsigned int status);
198 static int prb_queue_frozen(struct tpacket_kbdq_core *);
199 static void prb_open_block(struct tpacket_kbdq_core *,
200 struct tpacket_block_desc *);
201 static void prb_retire_rx_blk_timer_expired(struct timer_list *);
202 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
203 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
204 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
205 struct tpacket3_hdr *);
206 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
207 struct tpacket3_hdr *);
208 static void packet_flush_mclist(struct sock *sk);
209 static u16 packet_pick_tx_queue(struct sk_buff *skb);
211 struct packet_skb_cb {
213 struct sockaddr_pkt pkt;
215 /* Trick: alias skb original length with
216 * ll.sll_family and ll.protocol in order
219 unsigned int origlen;
220 struct sockaddr_ll ll;
225 #define vio_le() virtio_legacy_is_little_endian()
227 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
229 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
230 #define GET_PBLOCK_DESC(x, bid) \
231 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
232 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
233 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
234 #define GET_NEXT_PRB_BLK_NUM(x) \
235 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
236 ((x)->kactive_blk_num+1) : 0)
238 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239 static void __fanout_link(struct sock *sk, struct packet_sock *po);
241 static int packet_direct_xmit(struct sk_buff *skb)
243 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
246 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
248 struct net_device *dev;
251 dev = rcu_dereference(po->cached_dev);
259 static void packet_cached_dev_assign(struct packet_sock *po,
260 struct net_device *dev)
262 rcu_assign_pointer(po->cached_dev, dev);
265 static void packet_cached_dev_reset(struct packet_sock *po)
267 RCU_INIT_POINTER(po->cached_dev, NULL);
270 static bool packet_use_direct_xmit(const struct packet_sock *po)
272 return po->xmit == packet_direct_xmit;
275 static u16 packet_pick_tx_queue(struct sk_buff *skb)
277 struct net_device *dev = skb->dev;
278 const struct net_device_ops *ops = dev->netdev_ops;
279 int cpu = raw_smp_processor_id();
283 skb->sender_cpu = cpu + 1;
285 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
286 if (ops->ndo_select_queue) {
287 queue_index = ops->ndo_select_queue(dev, skb, NULL);
288 queue_index = netdev_cap_txqueue(dev, queue_index);
290 queue_index = netdev_pick_tx(dev, skb, NULL);
296 /* __register_prot_hook must be invoked through register_prot_hook
297 * or from a context in which asynchronous accesses to the packet
298 * socket is not possible (packet_create()).
300 static void __register_prot_hook(struct sock *sk)
302 struct packet_sock *po = pkt_sk(sk);
306 __fanout_link(sk, po);
308 dev_add_pack(&po->prot_hook);
315 static void register_prot_hook(struct sock *sk)
317 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
318 __register_prot_hook(sk);
321 /* If the sync parameter is true, we will temporarily drop
322 * the po->bind_lock and do a synchronize_net to make sure no
323 * asynchronous packet processing paths still refer to the elements
324 * of po->prot_hook. If the sync parameter is false, it is the
325 * callers responsibility to take care of this.
327 static void __unregister_prot_hook(struct sock *sk, bool sync)
329 struct packet_sock *po = pkt_sk(sk);
331 lockdep_assert_held_once(&po->bind_lock);
336 __fanout_unlink(sk, po);
338 __dev_remove_pack(&po->prot_hook);
343 spin_unlock(&po->bind_lock);
345 spin_lock(&po->bind_lock);
349 static void unregister_prot_hook(struct sock *sk, bool sync)
351 struct packet_sock *po = pkt_sk(sk);
354 __unregister_prot_hook(sk, sync);
357 static inline struct page * __pure pgv_to_page(void *addr)
359 if (is_vmalloc_addr(addr))
360 return vmalloc_to_page(addr);
361 return virt_to_page(addr);
364 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
366 union tpacket_uhdr h;
369 switch (po->tp_version) {
371 h.h1->tp_status = status;
372 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
375 h.h2->tp_status = status;
376 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
379 h.h3->tp_status = status;
380 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
383 WARN(1, "TPACKET version not supported.\n");
390 static int __packet_get_status(const struct packet_sock *po, void *frame)
392 union tpacket_uhdr h;
397 switch (po->tp_version) {
399 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
400 return h.h1->tp_status;
402 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
403 return h.h2->tp_status;
405 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
406 return h.h3->tp_status;
408 WARN(1, "TPACKET version not supported.\n");
414 static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
417 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
420 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
421 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
422 return TP_STATUS_TS_RAW_HARDWARE;
424 if (ktime_to_timespec64_cond(skb->tstamp, ts))
425 return TP_STATUS_TS_SOFTWARE;
430 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
433 union tpacket_uhdr h;
434 struct timespec64 ts;
437 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
442 * versions 1 through 3 overflow the timestamps in y2106, since they
443 * all store the seconds in a 32-bit unsigned integer.
444 * If we create a version 4, that should have a 64-bit timestamp,
445 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
448 switch (po->tp_version) {
450 h.h1->tp_sec = ts.tv_sec;
451 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
454 h.h2->tp_sec = ts.tv_sec;
455 h.h2->tp_nsec = ts.tv_nsec;
458 h.h3->tp_sec = ts.tv_sec;
459 h.h3->tp_nsec = ts.tv_nsec;
462 WARN(1, "TPACKET version not supported.\n");
466 /* one flush is safe, as both fields always lie on the same cacheline */
467 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
473 static void *packet_lookup_frame(const struct packet_sock *po,
474 const struct packet_ring_buffer *rb,
475 unsigned int position,
478 unsigned int pg_vec_pos, frame_offset;
479 union tpacket_uhdr h;
481 pg_vec_pos = position / rb->frames_per_block;
482 frame_offset = position % rb->frames_per_block;
484 h.raw = rb->pg_vec[pg_vec_pos].buffer +
485 (frame_offset * rb->frame_size);
487 if (status != __packet_get_status(po, h.raw))
493 static void *packet_current_frame(struct packet_sock *po,
494 struct packet_ring_buffer *rb,
497 return packet_lookup_frame(po, rb, rb->head, status);
500 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
502 del_timer_sync(&pkc->retire_blk_timer);
505 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
506 struct sk_buff_head *rb_queue)
508 struct tpacket_kbdq_core *pkc;
510 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
512 spin_lock_bh(&rb_queue->lock);
513 pkc->delete_blk_timer = 1;
514 spin_unlock_bh(&rb_queue->lock);
516 prb_del_retire_blk_timer(pkc);
519 static void prb_setup_retire_blk_timer(struct packet_sock *po)
521 struct tpacket_kbdq_core *pkc;
523 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
524 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
526 pkc->retire_blk_timer.expires = jiffies;
529 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
530 int blk_size_in_bytes)
532 struct net_device *dev;
533 unsigned int mbits, div;
534 struct ethtool_link_ksettings ecmd;
538 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
539 if (unlikely(!dev)) {
541 return DEFAULT_PRB_RETIRE_TOV;
543 err = __ethtool_get_link_ksettings(dev, &ecmd);
546 return DEFAULT_PRB_RETIRE_TOV;
548 /* If the link speed is so slow you don't really
549 * need to worry about perf anyways
551 if (ecmd.base.speed < SPEED_1000 ||
552 ecmd.base.speed == SPEED_UNKNOWN)
553 return DEFAULT_PRB_RETIRE_TOV;
555 div = ecmd.base.speed / 1000;
556 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
566 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
567 union tpacket_req_u *req_u)
569 p1->feature_req_word = req_u->req3.tp_feature_req_word;
572 static void init_prb_bdqc(struct packet_sock *po,
573 struct packet_ring_buffer *rb,
575 union tpacket_req_u *req_u)
577 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
578 struct tpacket_block_desc *pbd;
580 memset(p1, 0x0, sizeof(*p1));
582 p1->knxt_seq_num = 1;
584 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
585 p1->pkblk_start = pg_vec[0].buffer;
586 p1->kblk_size = req_u->req3.tp_block_size;
587 p1->knum_blocks = req_u->req3.tp_block_nr;
588 p1->hdrlen = po->tp_hdrlen;
589 p1->version = po->tp_version;
590 p1->last_kactive_blk_num = 0;
591 po->stats.stats3.tp_freeze_q_cnt = 0;
592 if (req_u->req3.tp_retire_blk_tov)
593 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
595 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
596 req_u->req3.tp_block_size);
597 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
598 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
599 rwlock_init(&p1->blk_fill_in_prog_lock);
601 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
602 prb_init_ft_ops(p1, req_u);
603 prb_setup_retire_blk_timer(po);
604 prb_open_block(p1, pbd);
607 /* Do NOT update the last_blk_num first.
608 * Assumes sk_buff_head lock is held.
610 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
612 mod_timer(&pkc->retire_blk_timer,
613 jiffies + pkc->tov_in_jiffies);
614 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
619 * 1) We refresh the timer only when we open a block.
620 * By doing this we don't waste cycles refreshing the timer
621 * on packet-by-packet basis.
623 * With a 1MB block-size, on a 1Gbps line, it will take
624 * i) ~8 ms to fill a block + ii) memcpy etc.
625 * In this cut we are not accounting for the memcpy time.
627 * So, if the user sets the 'tmo' to 10ms then the timer
628 * will never fire while the block is still getting filled
629 * (which is what we want). However, the user could choose
630 * to close a block early and that's fine.
632 * But when the timer does fire, we check whether or not to refresh it.
633 * Since the tmo granularity is in msecs, it is not too expensive
634 * to refresh the timer, lets say every '8' msecs.
635 * Either the user can set the 'tmo' or we can derive it based on
636 * a) line-speed and b) block-size.
637 * prb_calc_retire_blk_tmo() calculates the tmo.
640 static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
642 struct packet_sock *po =
643 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
644 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
646 struct tpacket_block_desc *pbd;
648 spin_lock(&po->sk.sk_receive_queue.lock);
650 frozen = prb_queue_frozen(pkc);
651 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
653 if (unlikely(pkc->delete_blk_timer))
656 /* We only need to plug the race when the block is partially filled.
658 * lock(); increment BLOCK_NUM_PKTS; unlock()
659 * copy_bits() is in progress ...
660 * timer fires on other cpu:
661 * we can't retire the current block because copy_bits
665 if (BLOCK_NUM_PKTS(pbd)) {
666 /* Waiting for skb_copy_bits to finish... */
667 write_lock(&pkc->blk_fill_in_prog_lock);
668 write_unlock(&pkc->blk_fill_in_prog_lock);
671 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
673 if (!BLOCK_NUM_PKTS(pbd)) {
674 /* An empty block. Just refresh the timer. */
677 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
678 if (!prb_dispatch_next_block(pkc, po))
683 /* Case 1. Queue was frozen because user-space was
686 if (prb_curr_blk_in_use(pbd)) {
688 * Ok, user-space is still behind.
689 * So just refresh the timer.
693 /* Case 2. queue was frozen,user-space caught up,
694 * now the link went idle && the timer fired.
695 * We don't have a block to close.So we open this
696 * block and restart the timer.
697 * opening a block thaws the queue,restarts timer
698 * Thawing/timer-refresh is a side effect.
700 prb_open_block(pkc, pbd);
707 _prb_refresh_rx_retire_blk_timer(pkc);
710 spin_unlock(&po->sk.sk_receive_queue.lock);
713 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
714 struct tpacket_block_desc *pbd1, __u32 status)
716 /* Flush everything minus the block header */
718 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
723 /* Skip the block header(we know header WILL fit in 4K) */
726 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
727 for (; start < end; start += PAGE_SIZE)
728 flush_dcache_page(pgv_to_page(start));
733 /* Now update the block status. */
735 BLOCK_STATUS(pbd1) = status;
737 /* Flush the block header */
739 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
741 flush_dcache_page(pgv_to_page(start));
751 * 2) Increment active_blk_num
753 * Note:We DONT refresh the timer on purpose.
754 * Because almost always the next block will be opened.
756 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
757 struct tpacket_block_desc *pbd1,
758 struct packet_sock *po, unsigned int stat)
760 __u32 status = TP_STATUS_USER | stat;
762 struct tpacket3_hdr *last_pkt;
763 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
764 struct sock *sk = &po->sk;
766 if (atomic_read(&po->tp_drops))
767 status |= TP_STATUS_LOSING;
769 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
770 last_pkt->tp_next_offset = 0;
772 /* Get the ts of the last pkt */
773 if (BLOCK_NUM_PKTS(pbd1)) {
774 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
775 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
777 /* Ok, we tmo'd - so get the current time.
779 * It shouldn't really happen as we don't close empty
780 * blocks. See prb_retire_rx_blk_timer_expired().
782 struct timespec64 ts;
783 ktime_get_real_ts64(&ts);
784 h1->ts_last_pkt.ts_sec = ts.tv_sec;
785 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
790 /* Flush the block */
791 prb_flush_block(pkc1, pbd1, status);
793 sk->sk_data_ready(sk);
795 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
798 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
800 pkc->reset_pending_on_curr_blk = 0;
804 * Side effect of opening a block:
806 * 1) prb_queue is thawed.
807 * 2) retire_blk_timer is refreshed.
810 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
811 struct tpacket_block_desc *pbd1)
813 struct timespec64 ts;
814 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
818 /* We could have just memset this but we will lose the
819 * flexibility of making the priv area sticky
822 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
823 BLOCK_NUM_PKTS(pbd1) = 0;
824 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
826 ktime_get_real_ts64(&ts);
828 h1->ts_first_pkt.ts_sec = ts.tv_sec;
829 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
831 pkc1->pkblk_start = (char *)pbd1;
832 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
834 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
835 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
837 pbd1->version = pkc1->version;
838 pkc1->prev = pkc1->nxt_offset;
839 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
841 prb_thaw_queue(pkc1);
842 _prb_refresh_rx_retire_blk_timer(pkc1);
848 * Queue freeze logic:
849 * 1) Assume tp_block_nr = 8 blocks.
850 * 2) At time 't0', user opens Rx ring.
851 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
852 * 4) user-space is either sleeping or processing block '0'.
853 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
854 * it will close block-7,loop around and try to fill block '0'.
856 * __packet_lookup_frame_in_block
857 * prb_retire_current_block()
858 * prb_dispatch_next_block()
859 * |->(BLOCK_STATUS == USER) evaluates to true
860 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
861 * 6) Now there are two cases:
862 * 6.1) Link goes idle right after the queue is frozen.
863 * But remember, the last open_block() refreshed the timer.
864 * When this timer expires,it will refresh itself so that we can
865 * re-open block-0 in near future.
866 * 6.2) Link is busy and keeps on receiving packets. This is a simple
867 * case and __packet_lookup_frame_in_block will check if block-0
868 * is free and can now be re-used.
870 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
871 struct packet_sock *po)
873 pkc->reset_pending_on_curr_blk = 1;
874 po->stats.stats3.tp_freeze_q_cnt++;
877 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880 * If the next block is free then we will dispatch it
881 * and return a good offset.
882 * Else, we will freeze the queue.
883 * So, caller must check the return value.
885 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
886 struct packet_sock *po)
888 struct tpacket_block_desc *pbd;
892 /* 1. Get current block num */
893 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
895 /* 2. If this block is currently in_use then freeze the queue */
896 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
897 prb_freeze_queue(pkc, po);
903 * open this block and return the offset where the first packet
904 * needs to get stored.
906 prb_open_block(pkc, pbd);
907 return (void *)pkc->nxt_offset;
910 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
911 struct packet_sock *po, unsigned int status)
913 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
915 /* retire/close the current block */
916 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
918 * Plug the case where copy_bits() is in progress on
919 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
920 * have space to copy the pkt in the current block and
921 * called prb_retire_current_block()
923 * We don't need to worry about the TMO case because
924 * the timer-handler already handled this case.
926 if (!(status & TP_STATUS_BLK_TMO)) {
927 /* Waiting for skb_copy_bits to finish... */
928 write_lock(&pkc->blk_fill_in_prog_lock);
929 write_unlock(&pkc->blk_fill_in_prog_lock);
931 prb_close_block(pkc, pbd, po, status);
936 static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
938 return TP_STATUS_USER & BLOCK_STATUS(pbd);
941 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
943 return pkc->reset_pending_on_curr_blk;
946 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
947 __releases(&pkc->blk_fill_in_prog_lock)
949 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
951 read_unlock(&pkc->blk_fill_in_prog_lock);
954 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
955 struct tpacket3_hdr *ppd)
957 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
960 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
961 struct tpacket3_hdr *ppd)
963 ppd->hv1.tp_rxhash = 0;
966 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
967 struct tpacket3_hdr *ppd)
969 if (skb_vlan_tag_present(pkc->skb)) {
970 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
971 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
972 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
974 ppd->hv1.tp_vlan_tci = 0;
975 ppd->hv1.tp_vlan_tpid = 0;
976 ppd->tp_status = TP_STATUS_AVAILABLE;
980 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
981 struct tpacket3_hdr *ppd)
983 ppd->hv1.tp_padding = 0;
984 prb_fill_vlan_info(pkc, ppd);
986 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
987 prb_fill_rxhash(pkc, ppd);
989 prb_clear_rxhash(pkc, ppd);
992 static void prb_fill_curr_block(char *curr,
993 struct tpacket_kbdq_core *pkc,
994 struct tpacket_block_desc *pbd,
996 __acquires(&pkc->blk_fill_in_prog_lock)
998 struct tpacket3_hdr *ppd;
1000 ppd = (struct tpacket3_hdr *)curr;
1001 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1004 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_NUM_PKTS(pbd) += 1;
1006 read_lock(&pkc->blk_fill_in_prog_lock);
1007 prb_run_all_ft_ops(pkc, ppd);
1010 /* Assumes caller has the sk->rx_queue.lock */
1011 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1012 struct sk_buff *skb,
1016 struct tpacket_kbdq_core *pkc;
1017 struct tpacket_block_desc *pbd;
1020 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1021 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023 /* Queue is frozen when user space is lagging behind */
1024 if (prb_queue_frozen(pkc)) {
1026 * Check if that last block which caused the queue to freeze,
1027 * is still in_use by user-space.
1029 if (prb_curr_blk_in_use(pbd)) {
1030 /* Can't record this packet */
1034 * Ok, the block was released by user-space.
1035 * Now let's open that block.
1036 * opening a block also thaws the queue.
1037 * Thawing is a side effect.
1039 prb_open_block(pkc, pbd);
1044 curr = pkc->nxt_offset;
1046 end = (char *)pbd + pkc->kblk_size;
1048 /* first try the current block */
1049 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1050 prb_fill_curr_block(curr, pkc, pbd, len);
1051 return (void *)curr;
1054 /* Ok, close the current block */
1055 prb_retire_current_block(pkc, po, 0);
1057 /* Now, try to dispatch the next block */
1058 curr = (char *)prb_dispatch_next_block(pkc, po);
1060 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1061 prb_fill_curr_block(curr, pkc, pbd, len);
1062 return (void *)curr;
1066 * No free blocks are available.user_space hasn't caught up yet.
1067 * Queue was just frozen and now this packet will get dropped.
1072 static void *packet_current_rx_frame(struct packet_sock *po,
1073 struct sk_buff *skb,
1074 int status, unsigned int len)
1077 switch (po->tp_version) {
1080 curr = packet_lookup_frame(po, &po->rx_ring,
1081 po->rx_ring.head, status);
1084 return __packet_lookup_frame_in_block(po, skb, len);
1086 WARN(1, "TPACKET version not supported\n");
1092 static void *prb_lookup_block(const struct packet_sock *po,
1093 const struct packet_ring_buffer *rb,
1097 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1098 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1100 if (status != BLOCK_STATUS(pbd))
1105 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108 if (rb->prb_bdqc.kactive_blk_num)
1109 prev = rb->prb_bdqc.kactive_blk_num-1;
1111 prev = rb->prb_bdqc.knum_blocks-1;
1115 /* Assumes caller has held the rx_queue.lock */
1116 static void *__prb_previous_block(struct packet_sock *po,
1117 struct packet_ring_buffer *rb,
1120 unsigned int previous = prb_previous_blk_num(rb);
1121 return prb_lookup_block(po, rb, previous, status);
1124 static void *packet_previous_rx_frame(struct packet_sock *po,
1125 struct packet_ring_buffer *rb,
1128 if (po->tp_version <= TPACKET_V2)
1129 return packet_previous_frame(po, rb, status);
1131 return __prb_previous_block(po, rb, status);
1134 static void packet_increment_rx_head(struct packet_sock *po,
1135 struct packet_ring_buffer *rb)
1137 switch (po->tp_version) {
1140 return packet_increment_head(rb);
1143 WARN(1, "TPACKET version not supported.\n");
1149 static void *packet_previous_frame(struct packet_sock *po,
1150 struct packet_ring_buffer *rb,
1153 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1154 return packet_lookup_frame(po, rb, previous, status);
1157 static void packet_increment_head(struct packet_ring_buffer *buff)
1159 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162 static void packet_inc_pending(struct packet_ring_buffer *rb)
1164 this_cpu_inc(*rb->pending_refcnt);
1167 static void packet_dec_pending(struct packet_ring_buffer *rb)
1169 this_cpu_dec(*rb->pending_refcnt);
1172 static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1174 unsigned int refcnt = 0;
1177 /* We don't use pending refcount in rx_ring. */
1178 if (rb->pending_refcnt == NULL)
1181 for_each_possible_cpu(cpu)
1182 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1187 static int packet_alloc_pending(struct packet_sock *po)
1189 po->rx_ring.pending_refcnt = NULL;
1191 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1192 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1198 static void packet_free_pending(struct packet_sock *po)
1200 free_percpu(po->tx_ring.pending_refcnt);
1203 #define ROOM_POW_OFF 2
1204 #define ROOM_NONE 0x0
1205 #define ROOM_LOW 0x1
1206 #define ROOM_NORMAL 0x2
1208 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1212 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1213 idx = READ_ONCE(po->rx_ring.head);
1215 idx += len >> pow_off;
1218 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1221 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1225 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1226 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1228 idx += len >> pow_off;
1231 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1234 static int __packet_rcv_has_room(const struct packet_sock *po,
1235 const struct sk_buff *skb)
1237 const struct sock *sk = &po->sk;
1238 int ret = ROOM_NONE;
1240 if (po->prot_hook.func != tpacket_rcv) {
1241 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1242 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1243 - (skb ? skb->truesize : 0);
1245 if (avail > (rcvbuf >> ROOM_POW_OFF))
1253 if (po->tp_version == TPACKET_V3) {
1254 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1256 else if (__tpacket_v3_has_room(po, 0))
1259 if (__tpacket_has_room(po, ROOM_POW_OFF))
1261 else if (__tpacket_has_room(po, 0))
1268 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1272 ret = __packet_rcv_has_room(po, skb);
1273 pressure = ret != ROOM_NORMAL;
1275 if (READ_ONCE(po->pressure) != pressure)
1276 WRITE_ONCE(po->pressure, pressure);
1281 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1283 if (READ_ONCE(po->pressure) &&
1284 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1285 WRITE_ONCE(po->pressure, 0);
1288 static void packet_sock_destruct(struct sock *sk)
1290 skb_queue_purge(&sk->sk_error_queue);
1292 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1293 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1295 if (!sock_flag(sk, SOCK_DEAD)) {
1296 pr_err("Attempt to release alive packet socket: %p\n", sk);
1300 sk_refcnt_debug_dec(sk);
1303 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1305 u32 *history = po->rollover->history;
1309 rxhash = skb_get_hash(skb);
1310 for (i = 0; i < ROLLOVER_HLEN; i++)
1311 if (READ_ONCE(history[i]) == rxhash)
1314 victim = prandom_u32() % ROLLOVER_HLEN;
1316 /* Avoid dirtying the cache line if possible */
1317 if (READ_ONCE(history[victim]) != rxhash)
1318 WRITE_ONCE(history[victim], rxhash);
1320 return count > (ROLLOVER_HLEN >> 1);
1323 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1324 struct sk_buff *skb,
1327 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1330 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1331 struct sk_buff *skb,
1334 unsigned int val = atomic_inc_return(&f->rr_cur);
1339 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1340 struct sk_buff *skb,
1343 return smp_processor_id() % num;
1346 static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1347 struct sk_buff *skb,
1350 return prandom_u32_max(num);
1353 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1354 struct sk_buff *skb,
1355 unsigned int idx, bool try_self,
1358 struct packet_sock *po, *po_next, *po_skip = NULL;
1359 unsigned int i, j, room = ROOM_NONE;
1361 po = pkt_sk(f->arr[idx]);
1364 room = packet_rcv_has_room(po, skb);
1365 if (room == ROOM_NORMAL ||
1366 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1371 i = j = min_t(int, po->rollover->sock, num - 1);
1373 po_next = pkt_sk(f->arr[i]);
1374 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1375 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1377 po->rollover->sock = i;
1378 atomic_long_inc(&po->rollover->num);
1379 if (room == ROOM_LOW)
1380 atomic_long_inc(&po->rollover->num_huge);
1388 atomic_long_inc(&po->rollover->num_failed);
1392 static unsigned int fanout_demux_qm(struct packet_fanout *f,
1393 struct sk_buff *skb,
1396 return skb_get_queue_mapping(skb) % num;
1399 static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1400 struct sk_buff *skb,
1403 struct bpf_prog *prog;
1404 unsigned int ret = 0;
1407 prog = rcu_dereference(f->bpf_prog);
1409 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1415 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1417 return f->flags & (flag >> 8);
1420 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1421 struct packet_type *pt, struct net_device *orig_dev)
1423 struct packet_fanout *f = pt->af_packet_priv;
1424 unsigned int num = READ_ONCE(f->num_members);
1425 struct net *net = read_pnet(&f->net);
1426 struct packet_sock *po;
1429 if (!net_eq(dev_net(dev), net) || !num) {
1434 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1435 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1440 case PACKET_FANOUT_HASH:
1442 idx = fanout_demux_hash(f, skb, num);
1444 case PACKET_FANOUT_LB:
1445 idx = fanout_demux_lb(f, skb, num);
1447 case PACKET_FANOUT_CPU:
1448 idx = fanout_demux_cpu(f, skb, num);
1450 case PACKET_FANOUT_RND:
1451 idx = fanout_demux_rnd(f, skb, num);
1453 case PACKET_FANOUT_QM:
1454 idx = fanout_demux_qm(f, skb, num);
1456 case PACKET_FANOUT_ROLLOVER:
1457 idx = fanout_demux_rollover(f, skb, 0, false, num);
1459 case PACKET_FANOUT_CBPF:
1460 case PACKET_FANOUT_EBPF:
1461 idx = fanout_demux_bpf(f, skb, num);
1465 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1466 idx = fanout_demux_rollover(f, skb, idx, true, num);
1468 po = pkt_sk(f->arr[idx]);
1469 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1472 DEFINE_MUTEX(fanout_mutex);
1473 EXPORT_SYMBOL_GPL(fanout_mutex);
1474 static LIST_HEAD(fanout_list);
1475 static u16 fanout_next_id;
1477 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1479 struct packet_fanout *f = po->fanout;
1481 spin_lock(&f->lock);
1482 f->arr[f->num_members] = sk;
1485 if (f->num_members == 1)
1486 dev_add_pack(&f->prot_hook);
1487 spin_unlock(&f->lock);
1490 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1492 struct packet_fanout *f = po->fanout;
1495 spin_lock(&f->lock);
1496 for (i = 0; i < f->num_members; i++) {
1497 if (f->arr[i] == sk)
1500 BUG_ON(i >= f->num_members);
1501 f->arr[i] = f->arr[f->num_members - 1];
1503 if (f->num_members == 0)
1504 __dev_remove_pack(&f->prot_hook);
1505 spin_unlock(&f->lock);
1508 static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1510 if (sk->sk_family != PF_PACKET)
1513 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1516 static void fanout_init_data(struct packet_fanout *f)
1519 case PACKET_FANOUT_LB:
1520 atomic_set(&f->rr_cur, 0);
1522 case PACKET_FANOUT_CBPF:
1523 case PACKET_FANOUT_EBPF:
1524 RCU_INIT_POINTER(f->bpf_prog, NULL);
1529 static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1531 struct bpf_prog *old;
1533 spin_lock(&f->lock);
1534 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1535 rcu_assign_pointer(f->bpf_prog, new);
1536 spin_unlock(&f->lock);
1540 bpf_prog_destroy(old);
1544 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1547 struct bpf_prog *new;
1548 struct sock_fprog fprog;
1551 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1554 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1558 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1562 __fanout_set_data_bpf(po->fanout, new);
1566 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1569 struct bpf_prog *new;
1572 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1574 if (len != sizeof(fd))
1576 if (copy_from_sockptr(&fd, data, len))
1579 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1581 return PTR_ERR(new);
1583 __fanout_set_data_bpf(po->fanout, new);
1587 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1590 switch (po->fanout->type) {
1591 case PACKET_FANOUT_CBPF:
1592 return fanout_set_data_cbpf(po, data, len);
1593 case PACKET_FANOUT_EBPF:
1594 return fanout_set_data_ebpf(po, data, len);
1600 static void fanout_release_data(struct packet_fanout *f)
1603 case PACKET_FANOUT_CBPF:
1604 case PACKET_FANOUT_EBPF:
1605 __fanout_set_data_bpf(f, NULL);
1609 static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1611 struct packet_fanout *f;
1613 list_for_each_entry(f, &fanout_list, list) {
1614 if (f->id == candidate_id &&
1615 read_pnet(&f->net) == sock_net(sk)) {
1622 static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1624 u16 id = fanout_next_id;
1627 if (__fanout_id_is_free(sk, id)) {
1629 fanout_next_id = id + 1;
1634 } while (id != fanout_next_id);
1639 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1641 struct packet_rollover *rollover = NULL;
1642 struct packet_sock *po = pkt_sk(sk);
1643 struct packet_fanout *f, *match;
1644 u8 type = type_flags & 0xff;
1645 u8 flags = type_flags >> 8;
1649 case PACKET_FANOUT_ROLLOVER:
1650 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1652 case PACKET_FANOUT_HASH:
1653 case PACKET_FANOUT_LB:
1654 case PACKET_FANOUT_CPU:
1655 case PACKET_FANOUT_RND:
1656 case PACKET_FANOUT_QM:
1657 case PACKET_FANOUT_CBPF:
1658 case PACKET_FANOUT_EBPF:
1664 mutex_lock(&fanout_mutex);
1670 if (type == PACKET_FANOUT_ROLLOVER ||
1671 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1673 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1676 atomic_long_set(&rollover->num, 0);
1677 atomic_long_set(&rollover->num_huge, 0);
1678 atomic_long_set(&rollover->num_failed, 0);
1681 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1686 if (!fanout_find_new_id(sk, &id)) {
1690 /* ephemeral flag for the first socket in the group: drop it */
1691 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1695 list_for_each_entry(f, &fanout_list, list) {
1697 read_pnet(&f->net) == sock_net(sk)) {
1703 if (match && match->flags != flags)
1707 match = kzalloc(sizeof(*match), GFP_KERNEL);
1710 write_pnet(&match->net, sock_net(sk));
1713 match->flags = flags;
1714 INIT_LIST_HEAD(&match->list);
1715 spin_lock_init(&match->lock);
1716 refcount_set(&match->sk_ref, 0);
1717 fanout_init_data(match);
1718 match->prot_hook.type = po->prot_hook.type;
1719 match->prot_hook.dev = po->prot_hook.dev;
1720 match->prot_hook.func = packet_rcv_fanout;
1721 match->prot_hook.af_packet_priv = match;
1722 match->prot_hook.id_match = match_fanout_group;
1723 list_add(&match->list, &fanout_list);
1727 spin_lock(&po->bind_lock);
1729 match->type == type &&
1730 match->prot_hook.type == po->prot_hook.type &&
1731 match->prot_hook.dev == po->prot_hook.dev) {
1733 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1734 __dev_remove_pack(&po->prot_hook);
1736 po->rollover = rollover;
1738 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1739 __fanout_link(sk, po);
1743 spin_unlock(&po->bind_lock);
1745 if (err && !refcount_read(&match->sk_ref)) {
1746 list_del(&match->list);
1752 mutex_unlock(&fanout_mutex);
1756 /* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1757 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1758 * It is the responsibility of the caller to call fanout_release_data() and
1759 * free the returned packet_fanout (after synchronize_net())
1761 static struct packet_fanout *fanout_release(struct sock *sk)
1763 struct packet_sock *po = pkt_sk(sk);
1764 struct packet_fanout *f;
1766 mutex_lock(&fanout_mutex);
1771 if (refcount_dec_and_test(&f->sk_ref))
1776 mutex_unlock(&fanout_mutex);
1781 static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1782 struct sk_buff *skb)
1784 /* Earlier code assumed this would be a VLAN pkt, double-check
1785 * this now that we have the actual packet in hand. We can only
1786 * do this check on Ethernet devices.
1788 if (unlikely(dev->type != ARPHRD_ETHER))
1791 skb_reset_mac_header(skb);
1792 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1795 static const struct proto_ops packet_ops;
1797 static const struct proto_ops packet_ops_spkt;
1799 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1800 struct packet_type *pt, struct net_device *orig_dev)
1803 struct sockaddr_pkt *spkt;
1806 * When we registered the protocol we saved the socket in the data
1807 * field for just this event.
1810 sk = pt->af_packet_priv;
1813 * Yank back the headers [hope the device set this
1814 * right or kerboom...]
1816 * Incoming packets have ll header pulled,
1819 * For outgoing ones skb->data == skb_mac_header(skb)
1820 * so that this procedure is noop.
1823 if (skb->pkt_type == PACKET_LOOPBACK)
1826 if (!net_eq(dev_net(dev), sock_net(sk)))
1829 skb = skb_share_check(skb, GFP_ATOMIC);
1833 /* drop any routing info */
1836 /* drop conntrack reference */
1839 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1841 skb_push(skb, skb->data - skb_mac_header(skb));
1844 * The SOCK_PACKET socket receives _all_ frames.
1847 spkt->spkt_family = dev->type;
1848 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1849 spkt->spkt_protocol = skb->protocol;
1852 * Charge the memory to the socket. This is done specifically
1853 * to prevent sockets using all the memory up.
1856 if (sock_queue_rcv_skb(sk, skb) == 0)
1865 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1867 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1868 sock->type == SOCK_RAW) {
1869 skb_reset_mac_header(skb);
1870 skb->protocol = dev_parse_header_protocol(skb);
1873 skb_probe_transport_header(skb);
1877 * Output a raw packet to a device layer. This bypasses all the other
1878 * protocol layers and you must therefore supply it with a complete frame
1881 static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1884 struct sock *sk = sock->sk;
1885 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1886 struct sk_buff *skb = NULL;
1887 struct net_device *dev;
1888 struct sockcm_cookie sockc;
1894 * Get and verify the address.
1898 if (msg->msg_namelen < sizeof(struct sockaddr))
1900 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1901 proto = saddr->spkt_protocol;
1903 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1906 * Find the device first to size check it
1909 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1912 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1918 if (!(dev->flags & IFF_UP))
1922 * You may not queue a frame bigger than the mtu. This is the lowest level
1923 * raw protocol and you must do your own fragmentation at this level.
1926 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1927 if (!netif_supports_nofcs(dev)) {
1928 err = -EPROTONOSUPPORT;
1931 extra_len = 4; /* We're doing our own CRC */
1935 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1939 size_t reserved = LL_RESERVED_SPACE(dev);
1940 int tlen = dev->needed_tailroom;
1941 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1944 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1947 /* FIXME: Save some space for broken drivers that write a hard
1948 * header at transmission time by themselves. PPP is the notable
1949 * one here. This should really be fixed at the driver level.
1951 skb_reserve(skb, reserved);
1952 skb_reset_network_header(skb);
1954 /* Try to align data part correctly */
1959 skb_reset_network_header(skb);
1961 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1967 if (!dev_validate_header(dev, skb->data, len)) {
1971 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1972 !packet_extra_vlan_len_allowed(dev, skb)) {
1977 sockcm_init(&sockc, sk);
1978 if (msg->msg_controllen) {
1979 err = sock_cmsg_send(sk, msg, &sockc);
1984 skb->protocol = proto;
1986 skb->priority = sk->sk_priority;
1987 skb->mark = sk->sk_mark;
1988 skb->tstamp = sockc.transmit_time;
1990 skb_setup_tx_timestamp(skb, sockc.tsflags);
1992 if (unlikely(extra_len == 4))
1995 packet_parse_headers(skb, sock);
1997 dev_queue_xmit(skb);
2008 static unsigned int run_filter(struct sk_buff *skb,
2009 const struct sock *sk,
2012 struct sk_filter *filter;
2015 filter = rcu_dereference(sk->sk_filter);
2017 res = bpf_prog_run_clear_cb(filter->prog, skb);
2023 static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2026 struct virtio_net_hdr vnet_hdr;
2028 if (*len < sizeof(vnet_hdr))
2030 *len -= sizeof(vnet_hdr);
2032 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2035 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2039 * This function makes lazy skb cloning in hope that most of packets
2040 * are discarded by BPF.
2042 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2043 * and skb->cb are mangled. It works because (and until) packets
2044 * falling here are owned by current CPU. Output packets are cloned
2045 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2046 * sequencially, so that if we return skb to original state on exit,
2047 * we will not harm anyone.
2050 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2051 struct packet_type *pt, struct net_device *orig_dev)
2054 struct sockaddr_ll *sll;
2055 struct packet_sock *po;
2056 u8 *skb_head = skb->data;
2057 int skb_len = skb->len;
2058 unsigned int snaplen, res;
2059 bool is_drop_n_account = false;
2061 if (skb->pkt_type == PACKET_LOOPBACK)
2064 sk = pt->af_packet_priv;
2067 if (!net_eq(dev_net(dev), sock_net(sk)))
2072 if (dev->header_ops) {
2073 /* The device has an explicit notion of ll header,
2074 * exported to higher levels.
2076 * Otherwise, the device hides details of its frame
2077 * structure, so that corresponding packet head is
2078 * never delivered to user.
2080 if (sk->sk_type != SOCK_DGRAM)
2081 skb_push(skb, skb->data - skb_mac_header(skb));
2082 else if (skb->pkt_type == PACKET_OUTGOING) {
2083 /* Special case: outgoing packets have ll header at head */
2084 skb_pull(skb, skb_network_offset(skb));
2090 res = run_filter(skb, sk, snaplen);
2092 goto drop_n_restore;
2096 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2099 if (skb_shared(skb)) {
2100 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2104 if (skb_head != skb->data) {
2105 skb->data = skb_head;
2112 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2114 sll = &PACKET_SKB_CB(skb)->sa.ll;
2115 sll->sll_hatype = dev->type;
2116 sll->sll_pkttype = skb->pkt_type;
2117 if (unlikely(po->origdev))
2118 sll->sll_ifindex = orig_dev->ifindex;
2120 sll->sll_ifindex = dev->ifindex;
2122 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2124 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2125 * Use their space for storing the original skb length.
2127 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2129 if (pskb_trim(skb, snaplen))
2132 skb_set_owner_r(skb, sk);
2136 /* drop conntrack reference */
2139 spin_lock(&sk->sk_receive_queue.lock);
2140 po->stats.stats1.tp_packets++;
2141 sock_skb_set_dropcount(sk, skb);
2142 __skb_queue_tail(&sk->sk_receive_queue, skb);
2143 spin_unlock(&sk->sk_receive_queue.lock);
2144 sk->sk_data_ready(sk);
2148 is_drop_n_account = true;
2149 atomic_inc(&po->tp_drops);
2150 atomic_inc(&sk->sk_drops);
2153 if (skb_head != skb->data && skb_shared(skb)) {
2154 skb->data = skb_head;
2158 if (!is_drop_n_account)
2165 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2166 struct packet_type *pt, struct net_device *orig_dev)
2169 struct packet_sock *po;
2170 struct sockaddr_ll *sll;
2171 union tpacket_uhdr h;
2172 u8 *skb_head = skb->data;
2173 int skb_len = skb->len;
2174 unsigned int snaplen, res;
2175 unsigned long status = TP_STATUS_USER;
2176 unsigned short macoff, hdrlen;
2177 unsigned int netoff;
2178 struct sk_buff *copy_skb = NULL;
2179 struct timespec64 ts;
2181 bool is_drop_n_account = false;
2182 unsigned int slot_id = 0;
2183 bool do_vnet = false;
2185 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2186 * We may add members to them until current aligned size without forcing
2187 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2189 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2190 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2192 if (skb->pkt_type == PACKET_LOOPBACK)
2195 sk = pt->af_packet_priv;
2198 if (!net_eq(dev_net(dev), sock_net(sk)))
2201 if (dev->header_ops) {
2202 if (sk->sk_type != SOCK_DGRAM)
2203 skb_push(skb, skb->data - skb_mac_header(skb));
2204 else if (skb->pkt_type == PACKET_OUTGOING) {
2205 /* Special case: outgoing packets have ll header at head */
2206 skb_pull(skb, skb_network_offset(skb));
2212 res = run_filter(skb, sk, snaplen);
2214 goto drop_n_restore;
2216 /* If we are flooded, just give up */
2217 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2218 atomic_inc(&po->tp_drops);
2219 goto drop_n_restore;
2222 if (skb->ip_summed == CHECKSUM_PARTIAL)
2223 status |= TP_STATUS_CSUMNOTREADY;
2224 else if (skb->pkt_type != PACKET_OUTGOING &&
2225 (skb->ip_summed == CHECKSUM_COMPLETE ||
2226 skb_csum_unnecessary(skb)))
2227 status |= TP_STATUS_CSUM_VALID;
2232 if (sk->sk_type == SOCK_DGRAM) {
2233 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2236 unsigned int maclen = skb_network_offset(skb);
2237 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2238 (maclen < 16 ? 16 : maclen)) +
2240 if (po->has_vnet_hdr) {
2241 netoff += sizeof(struct virtio_net_hdr);
2244 macoff = netoff - maclen;
2246 if (netoff > USHRT_MAX) {
2247 atomic_inc(&po->tp_drops);
2248 goto drop_n_restore;
2250 if (po->tp_version <= TPACKET_V2) {
2251 if (macoff + snaplen > po->rx_ring.frame_size) {
2252 if (po->copy_thresh &&
2253 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2254 if (skb_shared(skb)) {
2255 copy_skb = skb_clone(skb, GFP_ATOMIC);
2257 copy_skb = skb_get(skb);
2258 skb_head = skb->data;
2261 skb_set_owner_r(copy_skb, sk);
2263 snaplen = po->rx_ring.frame_size - macoff;
2264 if ((int)snaplen < 0) {
2269 } else if (unlikely(macoff + snaplen >
2270 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2273 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2274 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2275 snaplen, nval, macoff);
2277 if (unlikely((int)snaplen < 0)) {
2279 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2283 spin_lock(&sk->sk_receive_queue.lock);
2284 h.raw = packet_current_rx_frame(po, skb,
2285 TP_STATUS_KERNEL, (macoff+snaplen));
2287 goto drop_n_account;
2289 if (po->tp_version <= TPACKET_V2) {
2290 slot_id = po->rx_ring.head;
2291 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2292 goto drop_n_account;
2293 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2297 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2298 sizeof(struct virtio_net_hdr),
2299 vio_le(), true, 0)) {
2300 if (po->tp_version == TPACKET_V3)
2301 prb_clear_blk_fill_status(&po->rx_ring);
2302 goto drop_n_account;
2305 if (po->tp_version <= TPACKET_V2) {
2306 packet_increment_rx_head(po, &po->rx_ring);
2308 * LOSING will be reported till you read the stats,
2309 * because it's COR - Clear On Read.
2310 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2313 if (atomic_read(&po->tp_drops))
2314 status |= TP_STATUS_LOSING;
2317 po->stats.stats1.tp_packets++;
2319 status |= TP_STATUS_COPY;
2320 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2322 spin_unlock(&sk->sk_receive_queue.lock);
2324 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2326 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2327 ktime_get_real_ts64(&ts);
2329 status |= ts_status;
2331 switch (po->tp_version) {
2333 h.h1->tp_len = skb->len;
2334 h.h1->tp_snaplen = snaplen;
2335 h.h1->tp_mac = macoff;
2336 h.h1->tp_net = netoff;
2337 h.h1->tp_sec = ts.tv_sec;
2338 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2339 hdrlen = sizeof(*h.h1);
2342 h.h2->tp_len = skb->len;
2343 h.h2->tp_snaplen = snaplen;
2344 h.h2->tp_mac = macoff;
2345 h.h2->tp_net = netoff;
2346 h.h2->tp_sec = ts.tv_sec;
2347 h.h2->tp_nsec = ts.tv_nsec;
2348 if (skb_vlan_tag_present(skb)) {
2349 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2350 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2351 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2353 h.h2->tp_vlan_tci = 0;
2354 h.h2->tp_vlan_tpid = 0;
2356 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2357 hdrlen = sizeof(*h.h2);
2360 /* tp_nxt_offset,vlan are already populated above.
2361 * So DONT clear those fields here
2363 h.h3->tp_status |= status;
2364 h.h3->tp_len = skb->len;
2365 h.h3->tp_snaplen = snaplen;
2366 h.h3->tp_mac = macoff;
2367 h.h3->tp_net = netoff;
2368 h.h3->tp_sec = ts.tv_sec;
2369 h.h3->tp_nsec = ts.tv_nsec;
2370 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2371 hdrlen = sizeof(*h.h3);
2377 sll = h.raw + TPACKET_ALIGN(hdrlen);
2378 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2379 sll->sll_family = AF_PACKET;
2380 sll->sll_hatype = dev->type;
2381 sll->sll_protocol = skb->protocol;
2382 sll->sll_pkttype = skb->pkt_type;
2383 if (unlikely(po->origdev))
2384 sll->sll_ifindex = orig_dev->ifindex;
2386 sll->sll_ifindex = dev->ifindex;
2390 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2391 if (po->tp_version <= TPACKET_V2) {
2394 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2397 for (start = h.raw; start < end; start += PAGE_SIZE)
2398 flush_dcache_page(pgv_to_page(start));
2403 if (po->tp_version <= TPACKET_V2) {
2404 spin_lock(&sk->sk_receive_queue.lock);
2405 __packet_set_status(po, h.raw, status);
2406 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2407 spin_unlock(&sk->sk_receive_queue.lock);
2408 sk->sk_data_ready(sk);
2409 } else if (po->tp_version == TPACKET_V3) {
2410 prb_clear_blk_fill_status(&po->rx_ring);
2414 if (skb_head != skb->data && skb_shared(skb)) {
2415 skb->data = skb_head;
2419 if (!is_drop_n_account)
2426 spin_unlock(&sk->sk_receive_queue.lock);
2427 atomic_inc(&po->tp_drops);
2428 is_drop_n_account = true;
2430 sk->sk_data_ready(sk);
2431 kfree_skb(copy_skb);
2432 goto drop_n_restore;
2435 static void tpacket_destruct_skb(struct sk_buff *skb)
2437 struct packet_sock *po = pkt_sk(skb->sk);
2439 if (likely(po->tx_ring.pg_vec)) {
2443 ph = skb_zcopy_get_nouarg(skb);
2444 packet_dec_pending(&po->tx_ring);
2446 ts = __packet_set_timestamp(po, ph, skb);
2447 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2449 if (!packet_read_pending(&po->tx_ring))
2450 complete(&po->skb_completion);
2456 static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2458 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2459 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2460 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2461 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2462 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2463 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2464 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2466 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2472 static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2473 struct virtio_net_hdr *vnet_hdr)
2475 if (*len < sizeof(*vnet_hdr))
2477 *len -= sizeof(*vnet_hdr);
2479 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2482 return __packet_snd_vnet_parse(vnet_hdr, *len);
2485 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2486 void *frame, struct net_device *dev, void *data, int tp_len,
2487 __be16 proto, unsigned char *addr, int hlen, int copylen,
2488 const struct sockcm_cookie *sockc)
2490 union tpacket_uhdr ph;
2491 int to_write, offset, len, nr_frags, len_max;
2492 struct socket *sock = po->sk.sk_socket;
2498 skb->protocol = proto;
2500 skb->priority = po->sk.sk_priority;
2501 skb->mark = po->sk.sk_mark;
2502 skb->tstamp = sockc->transmit_time;
2503 skb_setup_tx_timestamp(skb, sockc->tsflags);
2504 skb_zcopy_set_nouarg(skb, ph.raw);
2506 skb_reserve(skb, hlen);
2507 skb_reset_network_header(skb);
2511 if (sock->type == SOCK_DGRAM) {
2512 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2514 if (unlikely(err < 0))
2516 } else if (copylen) {
2517 int hdrlen = min_t(int, copylen, tp_len);
2519 skb_push(skb, dev->hard_header_len);
2520 skb_put(skb, copylen - dev->hard_header_len);
2521 err = skb_store_bits(skb, 0, data, hdrlen);
2524 if (!dev_validate_header(dev, skb->data, hdrlen))
2531 offset = offset_in_page(data);
2532 len_max = PAGE_SIZE - offset;
2533 len = ((to_write > len_max) ? len_max : to_write);
2535 skb->data_len = to_write;
2536 skb->len += to_write;
2537 skb->truesize += to_write;
2538 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2540 while (likely(to_write)) {
2541 nr_frags = skb_shinfo(skb)->nr_frags;
2543 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2544 pr_err("Packet exceed the number of skb frags(%lu)\n",
2549 page = pgv_to_page(data);
2551 flush_dcache_page(page);
2553 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2556 len_max = PAGE_SIZE;
2557 len = ((to_write > len_max) ? len_max : to_write);
2560 packet_parse_headers(skb, sock);
2565 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2566 int size_max, void **data)
2568 union tpacket_uhdr ph;
2573 switch (po->tp_version) {
2575 if (ph.h3->tp_next_offset != 0) {
2576 pr_warn_once("variable sized slot not supported");
2579 tp_len = ph.h3->tp_len;
2582 tp_len = ph.h2->tp_len;
2585 tp_len = ph.h1->tp_len;
2588 if (unlikely(tp_len > size_max)) {
2589 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2593 if (unlikely(po->tp_tx_has_off)) {
2594 int off_min, off_max;
2596 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2597 off_max = po->tx_ring.frame_size - tp_len;
2598 if (po->sk.sk_type == SOCK_DGRAM) {
2599 switch (po->tp_version) {
2601 off = ph.h3->tp_net;
2604 off = ph.h2->tp_net;
2607 off = ph.h1->tp_net;
2611 switch (po->tp_version) {
2613 off = ph.h3->tp_mac;
2616 off = ph.h2->tp_mac;
2619 off = ph.h1->tp_mac;
2623 if (unlikely((off < off_min) || (off_max < off)))
2626 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2629 *data = frame + off;
2633 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2635 struct sk_buff *skb = NULL;
2636 struct net_device *dev;
2637 struct virtio_net_hdr *vnet_hdr = NULL;
2638 struct sockcm_cookie sockc;
2640 int err, reserve = 0;
2642 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2643 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2644 unsigned char *addr = NULL;
2645 int tp_len, size_max;
2648 int status = TP_STATUS_AVAILABLE;
2649 int hlen, tlen, copylen = 0;
2652 mutex_lock(&po->pg_vec_lock);
2654 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2655 * we need to confirm it under protection of pg_vec_lock.
2657 if (unlikely(!po->tx_ring.pg_vec)) {
2661 if (likely(saddr == NULL)) {
2662 dev = packet_cached_dev_get(po);
2666 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2668 if (msg->msg_namelen < (saddr->sll_halen
2669 + offsetof(struct sockaddr_ll,
2672 proto = saddr->sll_protocol;
2673 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2674 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2675 if (dev && msg->msg_namelen < dev->addr_len +
2676 offsetof(struct sockaddr_ll, sll_addr))
2678 addr = saddr->sll_addr;
2683 if (unlikely(dev == NULL))
2686 if (unlikely(!(dev->flags & IFF_UP)))
2689 sockcm_init(&sockc, &po->sk);
2690 if (msg->msg_controllen) {
2691 err = sock_cmsg_send(&po->sk, msg, &sockc);
2696 if (po->sk.sk_socket->type == SOCK_RAW)
2697 reserve = dev->hard_header_len;
2698 size_max = po->tx_ring.frame_size
2699 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2701 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2702 size_max = dev->mtu + reserve + VLAN_HLEN;
2704 reinit_completion(&po->skb_completion);
2707 ph = packet_current_frame(po, &po->tx_ring,
2708 TP_STATUS_SEND_REQUEST);
2709 if (unlikely(ph == NULL)) {
2710 if (need_wait && skb) {
2711 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2712 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2714 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2718 /* check for additional frames */
2723 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2727 status = TP_STATUS_SEND_REQUEST;
2728 hlen = LL_RESERVED_SPACE(dev);
2729 tlen = dev->needed_tailroom;
2730 if (po->has_vnet_hdr) {
2732 data += sizeof(*vnet_hdr);
2733 tp_len -= sizeof(*vnet_hdr);
2735 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2739 copylen = __virtio16_to_cpu(vio_le(),
2742 copylen = max_t(int, copylen, dev->hard_header_len);
2743 skb = sock_alloc_send_skb(&po->sk,
2744 hlen + tlen + sizeof(struct sockaddr_ll) +
2745 (copylen - dev->hard_header_len),
2748 if (unlikely(skb == NULL)) {
2749 /* we assume the socket was initially writeable ... */
2750 if (likely(len_sum > 0))
2754 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2755 addr, hlen, copylen, &sockc);
2756 if (likely(tp_len >= 0) &&
2757 tp_len > dev->mtu + reserve &&
2758 !po->has_vnet_hdr &&
2759 !packet_extra_vlan_len_allowed(dev, skb))
2762 if (unlikely(tp_len < 0)) {
2765 __packet_set_status(po, ph,
2766 TP_STATUS_AVAILABLE);
2767 packet_increment_head(&po->tx_ring);
2771 status = TP_STATUS_WRONG_FORMAT;
2777 if (po->has_vnet_hdr) {
2778 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2782 virtio_net_hdr_set_proto(skb, vnet_hdr);
2785 skb->destructor = tpacket_destruct_skb;
2786 __packet_set_status(po, ph, TP_STATUS_SENDING);
2787 packet_inc_pending(&po->tx_ring);
2789 status = TP_STATUS_SEND_REQUEST;
2790 err = po->xmit(skb);
2791 if (unlikely(err > 0)) {
2792 err = net_xmit_errno(err);
2793 if (err && __packet_get_status(po, ph) ==
2794 TP_STATUS_AVAILABLE) {
2795 /* skb was destructed already */
2800 * skb was dropped but not destructed yet;
2801 * let's treat it like congestion or err < 0
2805 packet_increment_head(&po->tx_ring);
2807 } while (likely((ph != NULL) ||
2808 /* Note: packet_read_pending() might be slow if we have
2809 * to call it as it's per_cpu variable, but in fast-path
2810 * we already short-circuit the loop with the first
2811 * condition, and luckily don't have to go that path
2814 (need_wait && packet_read_pending(&po->tx_ring))));
2820 __packet_set_status(po, ph, status);
2825 mutex_unlock(&po->pg_vec_lock);
2829 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2830 size_t reserve, size_t len,
2831 size_t linear, int noblock,
2834 struct sk_buff *skb;
2836 /* Under a page? Don't bother with paged skb. */
2837 if (prepad + len < PAGE_SIZE || !linear)
2840 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2845 skb_reserve(skb, reserve);
2846 skb_put(skb, linear);
2847 skb->data_len = len - linear;
2848 skb->len += len - linear;
2853 static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2855 struct sock *sk = sock->sk;
2856 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2857 struct sk_buff *skb;
2858 struct net_device *dev;
2860 unsigned char *addr = NULL;
2861 int err, reserve = 0;
2862 struct sockcm_cookie sockc;
2863 struct virtio_net_hdr vnet_hdr = { 0 };
2865 struct packet_sock *po = pkt_sk(sk);
2866 bool has_vnet_hdr = false;
2867 int hlen, tlen, linear;
2871 * Get and verify the address.
2874 if (likely(saddr == NULL)) {
2875 dev = packet_cached_dev_get(po);
2879 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2881 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2883 proto = saddr->sll_protocol;
2884 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2885 if (sock->type == SOCK_DGRAM) {
2886 if (dev && msg->msg_namelen < dev->addr_len +
2887 offsetof(struct sockaddr_ll, sll_addr))
2889 addr = saddr->sll_addr;
2894 if (unlikely(dev == NULL))
2897 if (unlikely(!(dev->flags & IFF_UP)))
2900 sockcm_init(&sockc, sk);
2901 sockc.mark = sk->sk_mark;
2902 if (msg->msg_controllen) {
2903 err = sock_cmsg_send(sk, msg, &sockc);
2908 if (sock->type == SOCK_RAW)
2909 reserve = dev->hard_header_len;
2910 if (po->has_vnet_hdr) {
2911 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2914 has_vnet_hdr = true;
2917 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2918 if (!netif_supports_nofcs(dev)) {
2919 err = -EPROTONOSUPPORT;
2922 extra_len = 4; /* We're doing our own CRC */
2926 if (!vnet_hdr.gso_type &&
2927 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2931 hlen = LL_RESERVED_SPACE(dev);
2932 tlen = dev->needed_tailroom;
2933 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2934 linear = max(linear, min_t(int, len, dev->hard_header_len));
2935 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2936 msg->msg_flags & MSG_DONTWAIT, &err);
2940 skb_reset_network_header(skb);
2943 if (sock->type == SOCK_DGRAM) {
2944 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2945 if (unlikely(offset < 0))
2947 } else if (reserve) {
2948 skb_reserve(skb, -reserve);
2949 if (len < reserve + sizeof(struct ipv6hdr) &&
2950 dev->min_header_len != dev->hard_header_len)
2951 skb_reset_network_header(skb);
2954 /* Returns -EFAULT on error */
2955 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2959 if (sock->type == SOCK_RAW &&
2960 !dev_validate_header(dev, skb->data, len)) {
2965 skb_setup_tx_timestamp(skb, sockc.tsflags);
2967 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2968 !packet_extra_vlan_len_allowed(dev, skb)) {
2973 skb->protocol = proto;
2975 skb->priority = sk->sk_priority;
2976 skb->mark = sockc.mark;
2977 skb->tstamp = sockc.transmit_time;
2980 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2983 len += sizeof(vnet_hdr);
2984 virtio_net_hdr_set_proto(skb, &vnet_hdr);
2987 packet_parse_headers(skb, sock);
2989 if (unlikely(extra_len == 4))
2992 err = po->xmit(skb);
2993 if (err > 0 && (err = net_xmit_errno(err)) != 0)
3009 static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3011 struct sock *sk = sock->sk;
3012 struct packet_sock *po = pkt_sk(sk);
3014 if (po->tx_ring.pg_vec)
3015 return tpacket_snd(po, msg);
3017 return packet_snd(sock, msg, len);
3021 * Close a PACKET socket. This is fairly simple. We immediately go
3022 * to 'closed' state and remove our protocol entry in the device list.
3025 static int packet_release(struct socket *sock)
3027 struct sock *sk = sock->sk;
3028 struct packet_sock *po;
3029 struct packet_fanout *f;
3031 union tpacket_req_u req_u;
3039 mutex_lock(&net->packet.sklist_lock);
3040 sk_del_node_init_rcu(sk);
3041 mutex_unlock(&net->packet.sklist_lock);
3044 sock_prot_inuse_add(net, sk->sk_prot, -1);
3047 spin_lock(&po->bind_lock);
3048 unregister_prot_hook(sk, false);
3049 packet_cached_dev_reset(po);
3051 if (po->prot_hook.dev) {
3052 dev_put(po->prot_hook.dev);
3053 po->prot_hook.dev = NULL;
3055 spin_unlock(&po->bind_lock);
3057 packet_flush_mclist(sk);
3060 if (po->rx_ring.pg_vec) {
3061 memset(&req_u, 0, sizeof(req_u));
3062 packet_set_ring(sk, &req_u, 1, 0);
3065 if (po->tx_ring.pg_vec) {
3066 memset(&req_u, 0, sizeof(req_u));
3067 packet_set_ring(sk, &req_u, 1, 1);
3071 f = fanout_release(sk);
3075 kfree(po->rollover);
3077 fanout_release_data(f);
3081 * Now the socket is dead. No more input will appear.
3088 skb_queue_purge(&sk->sk_receive_queue);
3089 packet_free_pending(po);
3090 sk_refcnt_debug_release(sk);
3097 * Attach a packet hook.
3100 static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3103 struct packet_sock *po = pkt_sk(sk);
3104 struct net_device *dev_curr;
3107 struct net_device *dev = NULL;
3109 bool unlisted = false;
3112 spin_lock(&po->bind_lock);
3121 dev = dev_get_by_name_rcu(sock_net(sk), name);
3126 } else if (ifindex) {
3127 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3137 proto_curr = po->prot_hook.type;
3138 dev_curr = po->prot_hook.dev;
3140 need_rehook = proto_curr != proto || dev_curr != dev;
3145 /* prevents packet_notifier() from calling
3146 * register_prot_hook()
3149 __unregister_prot_hook(sk, true);
3151 dev_curr = po->prot_hook.dev;
3153 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3157 BUG_ON(po->running);
3159 po->prot_hook.type = proto;
3161 if (unlikely(unlisted)) {
3163 po->prot_hook.dev = NULL;
3165 packet_cached_dev_reset(po);
3167 po->prot_hook.dev = dev;
3168 po->ifindex = dev ? dev->ifindex : 0;
3169 packet_cached_dev_assign(po, dev);
3175 if (proto == 0 || !need_rehook)
3178 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3179 register_prot_hook(sk);
3181 sk->sk_err = ENETDOWN;
3182 if (!sock_flag(sk, SOCK_DEAD))
3183 sk->sk_error_report(sk);
3188 spin_unlock(&po->bind_lock);
3194 * Bind a packet socket to a device
3197 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3200 struct sock *sk = sock->sk;
3201 char name[sizeof(uaddr->sa_data) + 1];
3207 if (addr_len != sizeof(struct sockaddr))
3209 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3212 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3213 name[sizeof(uaddr->sa_data)] = 0;
3215 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3218 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3220 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3221 struct sock *sk = sock->sk;
3227 if (addr_len < sizeof(struct sockaddr_ll))
3229 if (sll->sll_family != AF_PACKET)
3232 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3233 sll->sll_protocol ? : pkt_sk(sk)->num);
3236 static struct proto packet_proto = {
3238 .owner = THIS_MODULE,
3239 .obj_size = sizeof(struct packet_sock),
3243 * Create a packet of type SOCK_PACKET.
3246 static int packet_create(struct net *net, struct socket *sock, int protocol,
3250 struct packet_sock *po;
3251 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3254 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3256 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3257 sock->type != SOCK_PACKET)
3258 return -ESOCKTNOSUPPORT;
3260 sock->state = SS_UNCONNECTED;
3263 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3267 sock->ops = &packet_ops;
3268 if (sock->type == SOCK_PACKET)
3269 sock->ops = &packet_ops_spkt;
3271 sock_init_data(sock, sk);
3274 init_completion(&po->skb_completion);
3275 sk->sk_family = PF_PACKET;
3277 po->xmit = dev_queue_xmit;
3279 err = packet_alloc_pending(po);
3283 packet_cached_dev_reset(po);
3285 sk->sk_destruct = packet_sock_destruct;
3286 sk_refcnt_debug_inc(sk);
3289 * Attach a protocol block
3292 spin_lock_init(&po->bind_lock);
3293 mutex_init(&po->pg_vec_lock);
3294 po->rollover = NULL;
3295 po->prot_hook.func = packet_rcv;
3297 if (sock->type == SOCK_PACKET)
3298 po->prot_hook.func = packet_rcv_spkt;
3300 po->prot_hook.af_packet_priv = sk;
3303 po->prot_hook.type = proto;
3304 __register_prot_hook(sk);
3307 mutex_lock(&net->packet.sklist_lock);
3308 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3309 mutex_unlock(&net->packet.sklist_lock);
3312 sock_prot_inuse_add(net, &packet_proto, 1);
3323 * Pull a packet from our receive queue and hand it to the user.
3324 * If necessary we block.
3327 static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3330 struct sock *sk = sock->sk;
3331 struct sk_buff *skb;
3333 int vnet_hdr_len = 0;
3334 unsigned int origlen = 0;
3337 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3341 /* What error should we return now? EUNATTACH? */
3342 if (pkt_sk(sk)->ifindex < 0)
3346 if (flags & MSG_ERRQUEUE) {
3347 err = sock_recv_errqueue(sk, msg, len,
3348 SOL_PACKET, PACKET_TX_TIMESTAMP);
3353 * Call the generic datagram receiver. This handles all sorts
3354 * of horrible races and re-entrancy so we can forget about it
3355 * in the protocol layers.
3357 * Now it will return ENETDOWN, if device have just gone down,
3358 * but then it will block.
3361 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3364 * An error occurred so return it. Because skb_recv_datagram()
3365 * handles the blocking we don't see and worry about blocking
3372 packet_rcv_try_clear_pressure(pkt_sk(sk));
3374 if (pkt_sk(sk)->has_vnet_hdr) {
3375 err = packet_rcv_vnet(msg, skb, &len);
3378 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3381 /* You lose any data beyond the buffer you gave. If it worries
3382 * a user program they can ask the device for its MTU
3388 msg->msg_flags |= MSG_TRUNC;
3391 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3395 if (sock->type != SOCK_PACKET) {
3396 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3398 /* Original length was stored in sockaddr_ll fields */
3399 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3400 sll->sll_family = AF_PACKET;
3401 sll->sll_protocol = skb->protocol;
3404 sock_recv_ts_and_drops(msg, sk, skb);
3406 if (msg->msg_name) {
3409 /* If the address length field is there to be filled
3410 * in, we fill it in now.
3412 if (sock->type == SOCK_PACKET) {
3413 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3414 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3415 copy_len = msg->msg_namelen;
3417 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3419 msg->msg_namelen = sll->sll_halen +
3420 offsetof(struct sockaddr_ll, sll_addr);
3421 copy_len = msg->msg_namelen;
3422 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3423 memset(msg->msg_name +
3424 offsetof(struct sockaddr_ll, sll_addr),
3425 0, sizeof(sll->sll_addr));
3426 msg->msg_namelen = sizeof(struct sockaddr_ll);
3429 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3432 if (pkt_sk(sk)->auxdata) {
3433 struct tpacket_auxdata aux;
3435 aux.tp_status = TP_STATUS_USER;
3436 if (skb->ip_summed == CHECKSUM_PARTIAL)
3437 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3438 else if (skb->pkt_type != PACKET_OUTGOING &&
3439 (skb->ip_summed == CHECKSUM_COMPLETE ||
3440 skb_csum_unnecessary(skb)))
3441 aux.tp_status |= TP_STATUS_CSUM_VALID;
3443 aux.tp_len = origlen;
3444 aux.tp_snaplen = skb->len;
3446 aux.tp_net = skb_network_offset(skb);
3447 if (skb_vlan_tag_present(skb)) {
3448 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3449 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3450 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3452 aux.tp_vlan_tci = 0;
3453 aux.tp_vlan_tpid = 0;
3455 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3459 * Free or return the buffer as appropriate. Again this
3460 * hides all the races and re-entrancy issues from us.
3462 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3465 skb_free_datagram(sk, skb);
3470 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3473 struct net_device *dev;
3474 struct sock *sk = sock->sk;
3479 uaddr->sa_family = AF_PACKET;
3480 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3482 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3484 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3487 return sizeof(*uaddr);
3490 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3493 struct net_device *dev;
3494 struct sock *sk = sock->sk;
3495 struct packet_sock *po = pkt_sk(sk);
3496 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3501 sll->sll_family = AF_PACKET;
3502 sll->sll_ifindex = po->ifindex;
3503 sll->sll_protocol = po->num;
3504 sll->sll_pkttype = 0;
3506 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3508 sll->sll_hatype = dev->type;
3509 sll->sll_halen = dev->addr_len;
3510 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3512 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3517 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3520 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3524 case PACKET_MR_MULTICAST:
3525 if (i->alen != dev->addr_len)
3528 return dev_mc_add(dev, i->addr);
3530 return dev_mc_del(dev, i->addr);
3532 case PACKET_MR_PROMISC:
3533 return dev_set_promiscuity(dev, what);
3534 case PACKET_MR_ALLMULTI:
3535 return dev_set_allmulti(dev, what);
3536 case PACKET_MR_UNICAST:
3537 if (i->alen != dev->addr_len)
3540 return dev_uc_add(dev, i->addr);
3542 return dev_uc_del(dev, i->addr);
3550 static void packet_dev_mclist_delete(struct net_device *dev,
3551 struct packet_mclist **mlp)
3553 struct packet_mclist *ml;
3555 while ((ml = *mlp) != NULL) {
3556 if (ml->ifindex == dev->ifindex) {
3557 packet_dev_mc(dev, ml, -1);
3565 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3567 struct packet_sock *po = pkt_sk(sk);
3568 struct packet_mclist *ml, *i;
3569 struct net_device *dev;
3575 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3580 if (mreq->mr_alen > dev->addr_len)
3584 i = kmalloc(sizeof(*i), GFP_KERNEL);
3589 for (ml = po->mclist; ml; ml = ml->next) {
3590 if (ml->ifindex == mreq->mr_ifindex &&
3591 ml->type == mreq->mr_type &&
3592 ml->alen == mreq->mr_alen &&
3593 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3595 /* Free the new element ... */
3601 i->type = mreq->mr_type;
3602 i->ifindex = mreq->mr_ifindex;
3603 i->alen = mreq->mr_alen;
3604 memcpy(i->addr, mreq->mr_address, i->alen);
3605 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3607 i->next = po->mclist;
3609 err = packet_dev_mc(dev, i, 1);
3611 po->mclist = i->next;
3620 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3622 struct packet_mclist *ml, **mlp;
3626 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3627 if (ml->ifindex == mreq->mr_ifindex &&
3628 ml->type == mreq->mr_type &&
3629 ml->alen == mreq->mr_alen &&
3630 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3631 if (--ml->count == 0) {
3632 struct net_device *dev;
3634 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3636 packet_dev_mc(dev, ml, -1);
3646 static void packet_flush_mclist(struct sock *sk)
3648 struct packet_sock *po = pkt_sk(sk);
3649 struct packet_mclist *ml;
3655 while ((ml = po->mclist) != NULL) {
3656 struct net_device *dev;
3658 po->mclist = ml->next;
3659 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3661 packet_dev_mc(dev, ml, -1);
3668 packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3669 unsigned int optlen)
3671 struct sock *sk = sock->sk;
3672 struct packet_sock *po = pkt_sk(sk);
3675 if (level != SOL_PACKET)
3676 return -ENOPROTOOPT;
3679 case PACKET_ADD_MEMBERSHIP:
3680 case PACKET_DROP_MEMBERSHIP:
3682 struct packet_mreq_max mreq;
3684 memset(&mreq, 0, sizeof(mreq));
3685 if (len < sizeof(struct packet_mreq))
3687 if (len > sizeof(mreq))
3689 if (copy_from_sockptr(&mreq, optval, len))
3691 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3693 if (optname == PACKET_ADD_MEMBERSHIP)
3694 ret = packet_mc_add(sk, &mreq);
3696 ret = packet_mc_drop(sk, &mreq);
3700 case PACKET_RX_RING:
3701 case PACKET_TX_RING:
3703 union tpacket_req_u req_u;
3707 switch (po->tp_version) {
3710 len = sizeof(req_u.req);
3714 len = sizeof(req_u.req3);
3720 if (copy_from_sockptr(&req_u.req, optval, len))
3723 ret = packet_set_ring(sk, &req_u, 0,
3724 optname == PACKET_TX_RING);
3729 case PACKET_COPY_THRESH:
3733 if (optlen != sizeof(val))
3735 if (copy_from_sockptr(&val, optval, sizeof(val)))
3738 pkt_sk(sk)->copy_thresh = val;
3741 case PACKET_VERSION:
3745 if (optlen != sizeof(val))
3747 if (copy_from_sockptr(&val, optval, sizeof(val)))
3758 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3761 po->tp_version = val;
3767 case PACKET_RESERVE:
3771 if (optlen != sizeof(val))
3773 if (copy_from_sockptr(&val, optval, sizeof(val)))
3778 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3781 po->tp_reserve = val;
3791 if (optlen != sizeof(val))
3793 if (copy_from_sockptr(&val, optval, sizeof(val)))
3797 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3800 po->tp_loss = !!val;
3806 case PACKET_AUXDATA:
3810 if (optlen < sizeof(val))
3812 if (copy_from_sockptr(&val, optval, sizeof(val)))
3816 po->auxdata = !!val;
3820 case PACKET_ORIGDEV:
3824 if (optlen < sizeof(val))
3826 if (copy_from_sockptr(&val, optval, sizeof(val)))
3830 po->origdev = !!val;
3834 case PACKET_VNET_HDR:
3838 if (sock->type != SOCK_RAW)
3840 if (optlen < sizeof(val))
3842 if (copy_from_sockptr(&val, optval, sizeof(val)))
3846 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3849 po->has_vnet_hdr = !!val;
3855 case PACKET_TIMESTAMP:
3859 if (optlen != sizeof(val))
3861 if (copy_from_sockptr(&val, optval, sizeof(val)))
3864 po->tp_tstamp = val;
3871 if (optlen != sizeof(val))
3873 if (copy_from_sockptr(&val, optval, sizeof(val)))
3876 return fanout_add(sk, val & 0xffff, val >> 16);
3878 case PACKET_FANOUT_DATA:
3883 return fanout_set_data(po, optval, optlen);
3885 case PACKET_IGNORE_OUTGOING:
3889 if (optlen != sizeof(val))
3891 if (copy_from_sockptr(&val, optval, sizeof(val)))
3893 if (val < 0 || val > 1)
3896 po->prot_hook.ignore_outgoing = !!val;
3899 case PACKET_TX_HAS_OFF:
3903 if (optlen != sizeof(val))
3905 if (copy_from_sockptr(&val, optval, sizeof(val)))
3909 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3912 po->tp_tx_has_off = !!val;
3918 case PACKET_QDISC_BYPASS:
3922 if (optlen != sizeof(val))
3924 if (copy_from_sockptr(&val, optval, sizeof(val)))
3927 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3931 return -ENOPROTOOPT;
3935 static int packet_getsockopt(struct socket *sock, int level, int optname,
3936 char __user *optval, int __user *optlen)
3939 int val, lv = sizeof(val);
3940 struct sock *sk = sock->sk;
3941 struct packet_sock *po = pkt_sk(sk);
3943 union tpacket_stats_u st;
3944 struct tpacket_rollover_stats rstats;
3947 if (level != SOL_PACKET)
3948 return -ENOPROTOOPT;
3950 if (get_user(len, optlen))
3957 case PACKET_STATISTICS:
3958 spin_lock_bh(&sk->sk_receive_queue.lock);
3959 memcpy(&st, &po->stats, sizeof(st));
3960 memset(&po->stats, 0, sizeof(po->stats));
3961 spin_unlock_bh(&sk->sk_receive_queue.lock);
3962 drops = atomic_xchg(&po->tp_drops, 0);
3964 if (po->tp_version == TPACKET_V3) {
3965 lv = sizeof(struct tpacket_stats_v3);
3966 st.stats3.tp_drops = drops;
3967 st.stats3.tp_packets += drops;
3970 lv = sizeof(struct tpacket_stats);
3971 st.stats1.tp_drops = drops;
3972 st.stats1.tp_packets += drops;
3977 case PACKET_AUXDATA:
3980 case PACKET_ORIGDEV:
3983 case PACKET_VNET_HDR:
3984 val = po->has_vnet_hdr;
3986 case PACKET_VERSION:
3987 val = po->tp_version;
3990 if (len > sizeof(int))
3992 if (len < sizeof(int))
3994 if (copy_from_user(&val, optval, len))
3998 val = sizeof(struct tpacket_hdr);
4001 val = sizeof(struct tpacket2_hdr);
4004 val = sizeof(struct tpacket3_hdr);
4010 case PACKET_RESERVE:
4011 val = po->tp_reserve;
4016 case PACKET_TIMESTAMP:
4017 val = po->tp_tstamp;
4021 ((u32)po->fanout->id |
4022 ((u32)po->fanout->type << 16) |
4023 ((u32)po->fanout->flags << 24)) :
4026 case PACKET_IGNORE_OUTGOING:
4027 val = po->prot_hook.ignore_outgoing;
4029 case PACKET_ROLLOVER_STATS:
4032 rstats.tp_all = atomic_long_read(&po->rollover->num);
4033 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4034 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4036 lv = sizeof(rstats);
4038 case PACKET_TX_HAS_OFF:
4039 val = po->tp_tx_has_off;
4041 case PACKET_QDISC_BYPASS:
4042 val = packet_use_direct_xmit(po);
4045 return -ENOPROTOOPT;
4050 if (put_user(len, optlen))
4052 if (copy_to_user(optval, data, len))
4057 static int packet_notifier(struct notifier_block *this,
4058 unsigned long msg, void *ptr)
4061 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4062 struct net *net = dev_net(dev);
4065 sk_for_each_rcu(sk, &net->packet.sklist) {
4066 struct packet_sock *po = pkt_sk(sk);
4069 case NETDEV_UNREGISTER:
4071 packet_dev_mclist_delete(dev, &po->mclist);
4075 if (dev->ifindex == po->ifindex) {
4076 spin_lock(&po->bind_lock);
4078 __unregister_prot_hook(sk, false);
4079 sk->sk_err = ENETDOWN;
4080 if (!sock_flag(sk, SOCK_DEAD))
4081 sk->sk_error_report(sk);
4083 if (msg == NETDEV_UNREGISTER) {
4084 packet_cached_dev_reset(po);
4086 if (po->prot_hook.dev)
4087 dev_put(po->prot_hook.dev);
4088 po->prot_hook.dev = NULL;
4090 spin_unlock(&po->bind_lock);
4094 if (dev->ifindex == po->ifindex) {
4095 spin_lock(&po->bind_lock);
4097 register_prot_hook(sk);
4098 spin_unlock(&po->bind_lock);
4108 static int packet_ioctl(struct socket *sock, unsigned int cmd,
4111 struct sock *sk = sock->sk;
4116 int amount = sk_wmem_alloc_get(sk);
4118 return put_user(amount, (int __user *)arg);
4122 struct sk_buff *skb;
4125 spin_lock_bh(&sk->sk_receive_queue.lock);
4126 skb = skb_peek(&sk->sk_receive_queue);
4129 spin_unlock_bh(&sk->sk_receive_queue.lock);
4130 return put_user(amount, (int __user *)arg);
4140 case SIOCGIFBRDADDR:
4141 case SIOCSIFBRDADDR:
4142 case SIOCGIFNETMASK:
4143 case SIOCSIFNETMASK:
4144 case SIOCGIFDSTADDR:
4145 case SIOCSIFDSTADDR:
4147 return inet_dgram_ops.ioctl(sock, cmd, arg);
4151 return -ENOIOCTLCMD;
4156 static __poll_t packet_poll(struct file *file, struct socket *sock,
4159 struct sock *sk = sock->sk;
4160 struct packet_sock *po = pkt_sk(sk);
4161 __poll_t mask = datagram_poll(file, sock, wait);
4163 spin_lock_bh(&sk->sk_receive_queue.lock);
4164 if (po->rx_ring.pg_vec) {
4165 if (!packet_previous_rx_frame(po, &po->rx_ring,
4167 mask |= EPOLLIN | EPOLLRDNORM;
4169 packet_rcv_try_clear_pressure(po);
4170 spin_unlock_bh(&sk->sk_receive_queue.lock);
4171 spin_lock_bh(&sk->sk_write_queue.lock);
4172 if (po->tx_ring.pg_vec) {
4173 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4174 mask |= EPOLLOUT | EPOLLWRNORM;
4176 spin_unlock_bh(&sk->sk_write_queue.lock);
4181 /* Dirty? Well, I still did not learn better way to account
4185 static void packet_mm_open(struct vm_area_struct *vma)
4187 struct file *file = vma->vm_file;
4188 struct socket *sock = file->private_data;
4189 struct sock *sk = sock->sk;
4192 atomic_inc(&pkt_sk(sk)->mapped);
4195 static void packet_mm_close(struct vm_area_struct *vma)
4197 struct file *file = vma->vm_file;
4198 struct socket *sock = file->private_data;
4199 struct sock *sk = sock->sk;
4202 atomic_dec(&pkt_sk(sk)->mapped);
4205 static const struct vm_operations_struct packet_mmap_ops = {
4206 .open = packet_mm_open,
4207 .close = packet_mm_close,
4210 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4215 for (i = 0; i < len; i++) {
4216 if (likely(pg_vec[i].buffer)) {
4217 if (is_vmalloc_addr(pg_vec[i].buffer))
4218 vfree(pg_vec[i].buffer);
4220 free_pages((unsigned long)pg_vec[i].buffer,
4222 pg_vec[i].buffer = NULL;
4228 static char *alloc_one_pg_vec_page(unsigned long order)
4231 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4232 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4234 buffer = (char *) __get_free_pages(gfp_flags, order);
4238 /* __get_free_pages failed, fall back to vmalloc */
4239 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4243 /* vmalloc failed, lets dig into swap here */
4244 gfp_flags &= ~__GFP_NORETRY;
4245 buffer = (char *) __get_free_pages(gfp_flags, order);
4249 /* complete and utter failure */
4253 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4255 unsigned int block_nr = req->tp_block_nr;
4259 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4260 if (unlikely(!pg_vec))
4263 for (i = 0; i < block_nr; i++) {
4264 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4265 if (unlikely(!pg_vec[i].buffer))
4266 goto out_free_pgvec;
4273 free_pg_vec(pg_vec, order, block_nr);
4278 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4279 int closing, int tx_ring)
4281 struct pgv *pg_vec = NULL;
4282 struct packet_sock *po = pkt_sk(sk);
4283 unsigned long *rx_owner_map = NULL;
4284 int was_running, order = 0;
4285 struct packet_ring_buffer *rb;
4286 struct sk_buff_head *rb_queue;
4289 /* Added to avoid minimal code churn */
4290 struct tpacket_req *req = &req_u->req;
4292 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4293 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4297 if (atomic_read(&po->mapped))
4299 if (packet_read_pending(rb))
4303 if (req->tp_block_nr) {
4304 unsigned int min_frame_size;
4306 /* Sanity tests and some calculations */
4308 if (unlikely(rb->pg_vec))
4311 switch (po->tp_version) {
4313 po->tp_hdrlen = TPACKET_HDRLEN;
4316 po->tp_hdrlen = TPACKET2_HDRLEN;
4319 po->tp_hdrlen = TPACKET3_HDRLEN;
4324 if (unlikely((int)req->tp_block_size <= 0))
4326 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4328 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4329 if (po->tp_version >= TPACKET_V3 &&
4330 req->tp_block_size <
4331 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4333 if (unlikely(req->tp_frame_size < min_frame_size))
4335 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4338 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4339 if (unlikely(rb->frames_per_block == 0))
4341 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4343 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4348 order = get_order(req->tp_block_size);
4349 pg_vec = alloc_pg_vec(req, order);
4350 if (unlikely(!pg_vec))
4352 switch (po->tp_version) {
4354 /* Block transmit is not supported yet */
4356 init_prb_bdqc(po, rb, pg_vec, req_u);
4358 struct tpacket_req3 *req3 = &req_u->req3;
4360 if (req3->tp_retire_blk_tov ||
4361 req3->tp_sizeof_priv ||
4362 req3->tp_feature_req_word) {
4364 goto out_free_pg_vec;
4370 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4371 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4373 goto out_free_pg_vec;
4381 if (unlikely(req->tp_frame_nr))
4386 /* Detach socket from network */
4387 spin_lock(&po->bind_lock);
4388 was_running = po->running;
4392 __unregister_prot_hook(sk, false);
4394 spin_unlock(&po->bind_lock);
4399 mutex_lock(&po->pg_vec_lock);
4400 if (closing || atomic_read(&po->mapped) == 0) {
4402 spin_lock_bh(&rb_queue->lock);
4403 swap(rb->pg_vec, pg_vec);
4404 if (po->tp_version <= TPACKET_V2)
4405 swap(rb->rx_owner_map, rx_owner_map);
4406 rb->frame_max = (req->tp_frame_nr - 1);
4408 rb->frame_size = req->tp_frame_size;
4409 spin_unlock_bh(&rb_queue->lock);
4411 swap(rb->pg_vec_order, order);
4412 swap(rb->pg_vec_len, req->tp_block_nr);
4414 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4415 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4416 tpacket_rcv : packet_rcv;
4417 skb_queue_purge(rb_queue);
4418 if (atomic_read(&po->mapped))
4419 pr_err("packet_mmap: vma is busy: %d\n",
4420 atomic_read(&po->mapped));
4422 mutex_unlock(&po->pg_vec_lock);
4424 spin_lock(&po->bind_lock);
4427 register_prot_hook(sk);
4429 spin_unlock(&po->bind_lock);
4430 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4431 /* Because we don't support block-based V3 on tx-ring */
4433 prb_shutdown_retire_blk_timer(po, rb_queue);
4437 bitmap_free(rx_owner_map);
4439 free_pg_vec(pg_vec, order, req->tp_block_nr);
4444 static int packet_mmap(struct file *file, struct socket *sock,
4445 struct vm_area_struct *vma)
4447 struct sock *sk = sock->sk;
4448 struct packet_sock *po = pkt_sk(sk);
4449 unsigned long size, expected_size;
4450 struct packet_ring_buffer *rb;
4451 unsigned long start;
4458 mutex_lock(&po->pg_vec_lock);
4461 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4463 expected_size += rb->pg_vec_len
4469 if (expected_size == 0)
4472 size = vma->vm_end - vma->vm_start;
4473 if (size != expected_size)
4476 start = vma->vm_start;
4477 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4478 if (rb->pg_vec == NULL)
4481 for (i = 0; i < rb->pg_vec_len; i++) {
4483 void *kaddr = rb->pg_vec[i].buffer;
4486 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4487 page = pgv_to_page(kaddr);
4488 err = vm_insert_page(vma, start, page);
4497 atomic_inc(&po->mapped);
4498 vma->vm_ops = &packet_mmap_ops;
4502 mutex_unlock(&po->pg_vec_lock);
4506 static const struct proto_ops packet_ops_spkt = {
4507 .family = PF_PACKET,
4508 .owner = THIS_MODULE,
4509 .release = packet_release,
4510 .bind = packet_bind_spkt,
4511 .connect = sock_no_connect,
4512 .socketpair = sock_no_socketpair,
4513 .accept = sock_no_accept,
4514 .getname = packet_getname_spkt,
4515 .poll = datagram_poll,
4516 .ioctl = packet_ioctl,
4517 .gettstamp = sock_gettstamp,
4518 .listen = sock_no_listen,
4519 .shutdown = sock_no_shutdown,
4520 .sendmsg = packet_sendmsg_spkt,
4521 .recvmsg = packet_recvmsg,
4522 .mmap = sock_no_mmap,
4523 .sendpage = sock_no_sendpage,
4526 static const struct proto_ops packet_ops = {
4527 .family = PF_PACKET,
4528 .owner = THIS_MODULE,
4529 .release = packet_release,
4530 .bind = packet_bind,
4531 .connect = sock_no_connect,
4532 .socketpair = sock_no_socketpair,
4533 .accept = sock_no_accept,
4534 .getname = packet_getname,
4535 .poll = packet_poll,
4536 .ioctl = packet_ioctl,
4537 .gettstamp = sock_gettstamp,
4538 .listen = sock_no_listen,
4539 .shutdown = sock_no_shutdown,
4540 .setsockopt = packet_setsockopt,
4541 .getsockopt = packet_getsockopt,
4542 .sendmsg = packet_sendmsg,
4543 .recvmsg = packet_recvmsg,
4544 .mmap = packet_mmap,
4545 .sendpage = sock_no_sendpage,
4548 static const struct net_proto_family packet_family_ops = {
4549 .family = PF_PACKET,
4550 .create = packet_create,
4551 .owner = THIS_MODULE,
4554 static struct notifier_block packet_netdev_notifier = {
4555 .notifier_call = packet_notifier,
4558 #ifdef CONFIG_PROC_FS
4560 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4563 struct net *net = seq_file_net(seq);
4566 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4569 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4571 struct net *net = seq_file_net(seq);
4572 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4575 static void packet_seq_stop(struct seq_file *seq, void *v)
4581 static int packet_seq_show(struct seq_file *seq, void *v)
4583 if (v == SEQ_START_TOKEN)
4584 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4586 struct sock *s = sk_entry(v);
4587 const struct packet_sock *po = pkt_sk(s);
4590 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4592 refcount_read(&s->sk_refcnt),
4597 atomic_read(&s->sk_rmem_alloc),
4598 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4605 static const struct seq_operations packet_seq_ops = {
4606 .start = packet_seq_start,
4607 .next = packet_seq_next,
4608 .stop = packet_seq_stop,
4609 .show = packet_seq_show,
4613 static int __net_init packet_net_init(struct net *net)
4615 mutex_init(&net->packet.sklist_lock);
4616 INIT_HLIST_HEAD(&net->packet.sklist);
4618 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4619 sizeof(struct seq_net_private)))
4625 static void __net_exit packet_net_exit(struct net *net)
4627 remove_proc_entry("packet", net->proc_net);
4628 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4631 static struct pernet_operations packet_net_ops = {
4632 .init = packet_net_init,
4633 .exit = packet_net_exit,
4637 static void __exit packet_exit(void)
4639 unregister_netdevice_notifier(&packet_netdev_notifier);
4640 unregister_pernet_subsys(&packet_net_ops);
4641 sock_unregister(PF_PACKET);
4642 proto_unregister(&packet_proto);
4645 static int __init packet_init(void)
4649 rc = proto_register(&packet_proto, 0);
4652 rc = sock_register(&packet_family_ops);
4655 rc = register_pernet_subsys(&packet_net_ops);
4658 rc = register_netdevice_notifier(&packet_netdev_notifier);
4665 unregister_pernet_subsys(&packet_net_ops);
4667 sock_unregister(PF_PACKET);
4669 proto_unregister(&packet_proto);
4674 module_init(packet_init);
4675 module_exit(packet_exit);
4676 MODULE_LICENSE("GPL");
4677 MODULE_ALIAS_NETPROTO(PF_PACKET);