Merge tag 'ceph-for-5.3-rc1' of git://github.com/ceph/ceph-client
[linux-2.6-microblaze.git] / drivers / net / tun.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  TUN - Universal TUN/TAP device driver.
4  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5  *
6  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7  */
8
9 /*
10  *  Changes:
11  *
12  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13  *    Add TUNSETLINK ioctl to set the link encapsulation
14  *
15  *  Mark Smith <markzzzsmith@yahoo.com.au>
16  *    Use eth_random_addr() for tap MAC address.
17  *
18  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
19  *    Fixes in packet dropping, queue length setting and queue wakeup.
20  *    Increased default tx queue length.
21  *    Added ethtool API.
22  *    Minor cleanups
23  *
24  *  Daniel Podlejski <underley@underley.eu.org>
25  *    Modifications for 2.3.99-pre5 kernel.
26  */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #define DRV_NAME        "tun"
31 #define DRV_VERSION     "1.6"
32 #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33 #define DRV_COPYRIGHT   "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34
35 #include <linux/module.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/sched/signal.h>
39 #include <linux/major.h>
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/miscdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/compat.h>
51 #include <linux/if.h>
52 #include <linux/if_arp.h>
53 #include <linux/if_ether.h>
54 #include <linux/if_tun.h>
55 #include <linux/if_vlan.h>
56 #include <linux/crc32.h>
57 #include <linux/nsproxy.h>
58 #include <linux/virtio_net.h>
59 #include <linux/rcupdate.h>
60 #include <net/net_namespace.h>
61 #include <net/netns/generic.h>
62 #include <net/rtnetlink.h>
63 #include <net/sock.h>
64 #include <net/xdp.h>
65 #include <linux/seq_file.h>
66 #include <linux/uio.h>
67 #include <linux/skb_array.h>
68 #include <linux/bpf.h>
69 #include <linux/bpf_trace.h>
70 #include <linux/mutex.h>
71
72 #include <linux/uaccess.h>
73 #include <linux/proc_fs.h>
74
75 static void tun_default_link_ksettings(struct net_device *dev,
76                                        struct ethtool_link_ksettings *cmd);
77
78 /* Uncomment to enable debugging */
79 /* #define TUN_DEBUG 1 */
80
81 #ifdef TUN_DEBUG
82 static int debug;
83
84 #define tun_debug(level, tun, fmt, args...)                     \
85 do {                                                            \
86         if (tun->debug)                                         \
87                 netdev_printk(level, tun->dev, fmt, ##args);    \
88 } while (0)
89 #define DBG1(level, fmt, args...)                               \
90 do {                                                            \
91         if (debug == 2)                                         \
92                 printk(level fmt, ##args);                      \
93 } while (0)
94 #else
95 #define tun_debug(level, tun, fmt, args...)                     \
96 do {                                                            \
97         if (0)                                                  \
98                 netdev_printk(level, tun->dev, fmt, ##args);    \
99 } while (0)
100 #define DBG1(level, fmt, args...)                               \
101 do {                                                            \
102         if (0)                                                  \
103                 printk(level fmt, ##args);                      \
104 } while (0)
105 #endif
106
107 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
108
109 /* TUN device flags */
110
111 /* IFF_ATTACH_QUEUE is never stored in device flags,
112  * overload it to mean fasync when stored there.
113  */
114 #define TUN_FASYNC      IFF_ATTACH_QUEUE
115 /* High bits in flags field are unused. */
116 #define TUN_VNET_LE     0x80000000
117 #define TUN_VNET_BE     0x40000000
118
119 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
120                       IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
121
122 #define GOODCOPY_LEN 128
123
124 #define FLT_EXACT_COUNT 8
125 struct tap_filter {
126         unsigned int    count;    /* Number of addrs. Zero means disabled */
127         u32             mask[2];  /* Mask of the hashed addrs */
128         unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
129 };
130
131 /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
132  * to max number of VCPUs in guest. */
133 #define MAX_TAP_QUEUES 256
134 #define MAX_TAP_FLOWS  4096
135
136 #define TUN_FLOW_EXPIRE (3 * HZ)
137
138 struct tun_pcpu_stats {
139         u64 rx_packets;
140         u64 rx_bytes;
141         u64 tx_packets;
142         u64 tx_bytes;
143         struct u64_stats_sync syncp;
144         u32 rx_dropped;
145         u32 tx_dropped;
146         u32 rx_frame_errors;
147 };
148
149 /* A tun_file connects an open character device to a tuntap netdevice. It
150  * also contains all socket related structures (except sock_fprog and tap_filter)
151  * to serve as one transmit queue for tuntap device. The sock_fprog and
152  * tap_filter were kept in tun_struct since they were used for filtering for the
153  * netdevice not for a specific queue (at least I didn't see the requirement for
154  * this).
155  *
156  * RCU usage:
157  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
158  * other can only be read while rcu_read_lock or rtnl_lock is held.
159  */
160 struct tun_file {
161         struct sock sk;
162         struct socket socket;
163         struct tun_struct __rcu *tun;
164         struct fasync_struct *fasync;
165         /* only used for fasnyc */
166         unsigned int flags;
167         union {
168                 u16 queue_index;
169                 unsigned int ifindex;
170         };
171         struct napi_struct napi;
172         bool napi_enabled;
173         bool napi_frags_enabled;
174         struct mutex napi_mutex;        /* Protects access to the above napi */
175         struct list_head next;
176         struct tun_struct *detached;
177         struct ptr_ring tx_ring;
178         struct xdp_rxq_info xdp_rxq;
179 };
180
181 struct tun_page {
182         struct page *page;
183         int count;
184 };
185
186 struct tun_flow_entry {
187         struct hlist_node hash_link;
188         struct rcu_head rcu;
189         struct tun_struct *tun;
190
191         u32 rxhash;
192         u32 rps_rxhash;
193         int queue_index;
194         unsigned long updated ____cacheline_aligned_in_smp;
195 };
196
197 #define TUN_NUM_FLOW_ENTRIES 1024
198 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
199
200 struct tun_prog {
201         struct rcu_head rcu;
202         struct bpf_prog *prog;
203 };
204
205 /* Since the socket were moved to tun_file, to preserve the behavior of persist
206  * device, socket filter, sndbuf and vnet header size were restore when the
207  * file were attached to a persist device.
208  */
209 struct tun_struct {
210         struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
211         unsigned int            numqueues;
212         unsigned int            flags;
213         kuid_t                  owner;
214         kgid_t                  group;
215
216         struct net_device       *dev;
217         netdev_features_t       set_features;
218 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
219                           NETIF_F_TSO6)
220
221         int                     align;
222         int                     vnet_hdr_sz;
223         int                     sndbuf;
224         struct tap_filter       txflt;
225         struct sock_fprog       fprog;
226         /* protected by rtnl lock */
227         bool                    filter_attached;
228 #ifdef TUN_DEBUG
229         int debug;
230 #endif
231         spinlock_t lock;
232         struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
233         struct timer_list flow_gc_timer;
234         unsigned long ageing_time;
235         unsigned int numdisabled;
236         struct list_head disabled;
237         void *security;
238         u32 flow_count;
239         u32 rx_batched;
240         struct tun_pcpu_stats __percpu *pcpu_stats;
241         struct bpf_prog __rcu *xdp_prog;
242         struct tun_prog __rcu *steering_prog;
243         struct tun_prog __rcu *filter_prog;
244         struct ethtool_link_ksettings link_ksettings;
245 };
246
247 struct veth {
248         __be16 h_vlan_proto;
249         __be16 h_vlan_TCI;
250 };
251
252 bool tun_is_xdp_frame(void *ptr)
253 {
254         return (unsigned long)ptr & TUN_XDP_FLAG;
255 }
256 EXPORT_SYMBOL(tun_is_xdp_frame);
257
258 void *tun_xdp_to_ptr(void *ptr)
259 {
260         return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
261 }
262 EXPORT_SYMBOL(tun_xdp_to_ptr);
263
264 void *tun_ptr_to_xdp(void *ptr)
265 {
266         return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
267 }
268 EXPORT_SYMBOL(tun_ptr_to_xdp);
269
270 static int tun_napi_receive(struct napi_struct *napi, int budget)
271 {
272         struct tun_file *tfile = container_of(napi, struct tun_file, napi);
273         struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
274         struct sk_buff_head process_queue;
275         struct sk_buff *skb;
276         int received = 0;
277
278         __skb_queue_head_init(&process_queue);
279
280         spin_lock(&queue->lock);
281         skb_queue_splice_tail_init(queue, &process_queue);
282         spin_unlock(&queue->lock);
283
284         while (received < budget && (skb = __skb_dequeue(&process_queue))) {
285                 napi_gro_receive(napi, skb);
286                 ++received;
287         }
288
289         if (!skb_queue_empty(&process_queue)) {
290                 spin_lock(&queue->lock);
291                 skb_queue_splice(&process_queue, queue);
292                 spin_unlock(&queue->lock);
293         }
294
295         return received;
296 }
297
298 static int tun_napi_poll(struct napi_struct *napi, int budget)
299 {
300         unsigned int received;
301
302         received = tun_napi_receive(napi, budget);
303
304         if (received < budget)
305                 napi_complete_done(napi, received);
306
307         return received;
308 }
309
310 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
311                           bool napi_en, bool napi_frags)
312 {
313         tfile->napi_enabled = napi_en;
314         tfile->napi_frags_enabled = napi_en && napi_frags;
315         if (napi_en) {
316                 netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
317                                NAPI_POLL_WEIGHT);
318                 napi_enable(&tfile->napi);
319         }
320 }
321
322 static void tun_napi_disable(struct tun_file *tfile)
323 {
324         if (tfile->napi_enabled)
325                 napi_disable(&tfile->napi);
326 }
327
328 static void tun_napi_del(struct tun_file *tfile)
329 {
330         if (tfile->napi_enabled)
331                 netif_napi_del(&tfile->napi);
332 }
333
334 static bool tun_napi_frags_enabled(const struct tun_file *tfile)
335 {
336         return tfile->napi_frags_enabled;
337 }
338
339 #ifdef CONFIG_TUN_VNET_CROSS_LE
340 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
341 {
342         return tun->flags & TUN_VNET_BE ? false :
343                 virtio_legacy_is_little_endian();
344 }
345
346 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
347 {
348         int be = !!(tun->flags & TUN_VNET_BE);
349
350         if (put_user(be, argp))
351                 return -EFAULT;
352
353         return 0;
354 }
355
356 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
357 {
358         int be;
359
360         if (get_user(be, argp))
361                 return -EFAULT;
362
363         if (be)
364                 tun->flags |= TUN_VNET_BE;
365         else
366                 tun->flags &= ~TUN_VNET_BE;
367
368         return 0;
369 }
370 #else
371 static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
372 {
373         return virtio_legacy_is_little_endian();
374 }
375
376 static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
377 {
378         return -EINVAL;
379 }
380
381 static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
382 {
383         return -EINVAL;
384 }
385 #endif /* CONFIG_TUN_VNET_CROSS_LE */
386
387 static inline bool tun_is_little_endian(struct tun_struct *tun)
388 {
389         return tun->flags & TUN_VNET_LE ||
390                 tun_legacy_is_little_endian(tun);
391 }
392
393 static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
394 {
395         return __virtio16_to_cpu(tun_is_little_endian(tun), val);
396 }
397
398 static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
399 {
400         return __cpu_to_virtio16(tun_is_little_endian(tun), val);
401 }
402
403 static inline u32 tun_hashfn(u32 rxhash)
404 {
405         return rxhash & TUN_MASK_FLOW_ENTRIES;
406 }
407
408 static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
409 {
410         struct tun_flow_entry *e;
411
412         hlist_for_each_entry_rcu(e, head, hash_link) {
413                 if (e->rxhash == rxhash)
414                         return e;
415         }
416         return NULL;
417 }
418
419 static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
420                                               struct hlist_head *head,
421                                               u32 rxhash, u16 queue_index)
422 {
423         struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
424
425         if (e) {
426                 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
427                           rxhash, queue_index);
428                 e->updated = jiffies;
429                 e->rxhash = rxhash;
430                 e->rps_rxhash = 0;
431                 e->queue_index = queue_index;
432                 e->tun = tun;
433                 hlist_add_head_rcu(&e->hash_link, head);
434                 ++tun->flow_count;
435         }
436         return e;
437 }
438
439 static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
440 {
441         tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
442                   e->rxhash, e->queue_index);
443         hlist_del_rcu(&e->hash_link);
444         kfree_rcu(e, rcu);
445         --tun->flow_count;
446 }
447
448 static void tun_flow_flush(struct tun_struct *tun)
449 {
450         int i;
451
452         spin_lock_bh(&tun->lock);
453         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
454                 struct tun_flow_entry *e;
455                 struct hlist_node *n;
456
457                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
458                         tun_flow_delete(tun, e);
459         }
460         spin_unlock_bh(&tun->lock);
461 }
462
463 static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
464 {
465         int i;
466
467         spin_lock_bh(&tun->lock);
468         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
469                 struct tun_flow_entry *e;
470                 struct hlist_node *n;
471
472                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
473                         if (e->queue_index == queue_index)
474                                 tun_flow_delete(tun, e);
475                 }
476         }
477         spin_unlock_bh(&tun->lock);
478 }
479
480 static void tun_flow_cleanup(struct timer_list *t)
481 {
482         struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
483         unsigned long delay = tun->ageing_time;
484         unsigned long next_timer = jiffies + delay;
485         unsigned long count = 0;
486         int i;
487
488         tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
489
490         spin_lock(&tun->lock);
491         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
492                 struct tun_flow_entry *e;
493                 struct hlist_node *n;
494
495                 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
496                         unsigned long this_timer;
497
498                         this_timer = e->updated + delay;
499                         if (time_before_eq(this_timer, jiffies)) {
500                                 tun_flow_delete(tun, e);
501                                 continue;
502                         }
503                         count++;
504                         if (time_before(this_timer, next_timer))
505                                 next_timer = this_timer;
506                 }
507         }
508
509         if (count)
510                 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
511         spin_unlock(&tun->lock);
512 }
513
514 static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
515                             struct tun_file *tfile)
516 {
517         struct hlist_head *head;
518         struct tun_flow_entry *e;
519         unsigned long delay = tun->ageing_time;
520         u16 queue_index = tfile->queue_index;
521
522         head = &tun->flows[tun_hashfn(rxhash)];
523
524         rcu_read_lock();
525
526         e = tun_flow_find(head, rxhash);
527         if (likely(e)) {
528                 /* TODO: keep queueing to old queue until it's empty? */
529                 if (e->queue_index != queue_index)
530                         e->queue_index = queue_index;
531                 if (e->updated != jiffies)
532                         e->updated = jiffies;
533                 sock_rps_record_flow_hash(e->rps_rxhash);
534         } else {
535                 spin_lock_bh(&tun->lock);
536                 if (!tun_flow_find(head, rxhash) &&
537                     tun->flow_count < MAX_TAP_FLOWS)
538                         tun_flow_create(tun, head, rxhash, queue_index);
539
540                 if (!timer_pending(&tun->flow_gc_timer))
541                         mod_timer(&tun->flow_gc_timer,
542                                   round_jiffies_up(jiffies + delay));
543                 spin_unlock_bh(&tun->lock);
544         }
545
546         rcu_read_unlock();
547 }
548
549 /**
550  * Save the hash received in the stack receive path and update the
551  * flow_hash table accordingly.
552  */
553 static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
554 {
555         if (unlikely(e->rps_rxhash != hash))
556                 e->rps_rxhash = hash;
557 }
558
559 /* We try to identify a flow through its rxhash. The reason that
560  * we do not check rxq no. is because some cards(e.g 82599), chooses
561  * the rxq based on the txq where the last packet of the flow comes. As
562  * the userspace application move between processors, we may get a
563  * different rxq no. here.
564  */
565 static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
566 {
567         struct tun_flow_entry *e;
568         u32 txq = 0;
569         u32 numqueues = 0;
570
571         numqueues = READ_ONCE(tun->numqueues);
572
573         txq = __skb_get_hash_symmetric(skb);
574         e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
575         if (e) {
576                 tun_flow_save_rps_rxhash(e, txq);
577                 txq = e->queue_index;
578         } else {
579                 /* use multiply and shift instead of expensive divide */
580                 txq = ((u64)txq * numqueues) >> 32;
581         }
582
583         return txq;
584 }
585
586 static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
587 {
588         struct tun_prog *prog;
589         u32 numqueues;
590         u16 ret = 0;
591
592         numqueues = READ_ONCE(tun->numqueues);
593         if (!numqueues)
594                 return 0;
595
596         prog = rcu_dereference(tun->steering_prog);
597         if (prog)
598                 ret = bpf_prog_run_clear_cb(prog->prog, skb);
599
600         return ret % numqueues;
601 }
602
603 static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
604                             struct net_device *sb_dev)
605 {
606         struct tun_struct *tun = netdev_priv(dev);
607         u16 ret;
608
609         rcu_read_lock();
610         if (rcu_dereference(tun->steering_prog))
611                 ret = tun_ebpf_select_queue(tun, skb);
612         else
613                 ret = tun_automq_select_queue(tun, skb);
614         rcu_read_unlock();
615
616         return ret;
617 }
618
619 static inline bool tun_not_capable(struct tun_struct *tun)
620 {
621         const struct cred *cred = current_cred();
622         struct net *net = dev_net(tun->dev);
623
624         return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
625                   (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
626                 !ns_capable(net->user_ns, CAP_NET_ADMIN);
627 }
628
629 static void tun_set_real_num_queues(struct tun_struct *tun)
630 {
631         netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
632         netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
633 }
634
635 static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
636 {
637         tfile->detached = tun;
638         list_add_tail(&tfile->next, &tun->disabled);
639         ++tun->numdisabled;
640 }
641
642 static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
643 {
644         struct tun_struct *tun = tfile->detached;
645
646         tfile->detached = NULL;
647         list_del_init(&tfile->next);
648         --tun->numdisabled;
649         return tun;
650 }
651
652 void tun_ptr_free(void *ptr)
653 {
654         if (!ptr)
655                 return;
656         if (tun_is_xdp_frame(ptr)) {
657                 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
658
659                 xdp_return_frame(xdpf);
660         } else {
661                 __skb_array_destroy_skb(ptr);
662         }
663 }
664 EXPORT_SYMBOL_GPL(tun_ptr_free);
665
666 static void tun_queue_purge(struct tun_file *tfile)
667 {
668         void *ptr;
669
670         while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
671                 tun_ptr_free(ptr);
672
673         skb_queue_purge(&tfile->sk.sk_write_queue);
674         skb_queue_purge(&tfile->sk.sk_error_queue);
675 }
676
677 static void __tun_detach(struct tun_file *tfile, bool clean)
678 {
679         struct tun_file *ntfile;
680         struct tun_struct *tun;
681
682         tun = rtnl_dereference(tfile->tun);
683
684         if (tun && clean) {
685                 tun_napi_disable(tfile);
686                 tun_napi_del(tfile);
687         }
688
689         if (tun && !tfile->detached) {
690                 u16 index = tfile->queue_index;
691                 BUG_ON(index >= tun->numqueues);
692
693                 rcu_assign_pointer(tun->tfiles[index],
694                                    tun->tfiles[tun->numqueues - 1]);
695                 ntfile = rtnl_dereference(tun->tfiles[index]);
696                 ntfile->queue_index = index;
697                 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
698                                    NULL);
699
700                 --tun->numqueues;
701                 if (clean) {
702                         RCU_INIT_POINTER(tfile->tun, NULL);
703                         sock_put(&tfile->sk);
704                 } else
705                         tun_disable_queue(tun, tfile);
706
707                 synchronize_net();
708                 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
709                 /* Drop read queue */
710                 tun_queue_purge(tfile);
711                 tun_set_real_num_queues(tun);
712         } else if (tfile->detached && clean) {
713                 tun = tun_enable_queue(tfile);
714                 sock_put(&tfile->sk);
715         }
716
717         if (clean) {
718                 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
719                         netif_carrier_off(tun->dev);
720
721                         if (!(tun->flags & IFF_PERSIST) &&
722                             tun->dev->reg_state == NETREG_REGISTERED)
723                                 unregister_netdevice(tun->dev);
724                 }
725                 if (tun)
726                         xdp_rxq_info_unreg(&tfile->xdp_rxq);
727                 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
728                 sock_put(&tfile->sk);
729         }
730 }
731
732 static void tun_detach(struct tun_file *tfile, bool clean)
733 {
734         struct tun_struct *tun;
735         struct net_device *dev;
736
737         rtnl_lock();
738         tun = rtnl_dereference(tfile->tun);
739         dev = tun ? tun->dev : NULL;
740         __tun_detach(tfile, clean);
741         if (dev)
742                 netdev_state_change(dev);
743         rtnl_unlock();
744 }
745
746 static void tun_detach_all(struct net_device *dev)
747 {
748         struct tun_struct *tun = netdev_priv(dev);
749         struct tun_file *tfile, *tmp;
750         int i, n = tun->numqueues;
751
752         for (i = 0; i < n; i++) {
753                 tfile = rtnl_dereference(tun->tfiles[i]);
754                 BUG_ON(!tfile);
755                 tun_napi_disable(tfile);
756                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
757                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
758                 RCU_INIT_POINTER(tfile->tun, NULL);
759                 --tun->numqueues;
760         }
761         list_for_each_entry(tfile, &tun->disabled, next) {
762                 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
763                 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
764                 RCU_INIT_POINTER(tfile->tun, NULL);
765         }
766         BUG_ON(tun->numqueues != 0);
767
768         synchronize_net();
769         for (i = 0; i < n; i++) {
770                 tfile = rtnl_dereference(tun->tfiles[i]);
771                 tun_napi_del(tfile);
772                 /* Drop read queue */
773                 tun_queue_purge(tfile);
774                 xdp_rxq_info_unreg(&tfile->xdp_rxq);
775                 sock_put(&tfile->sk);
776         }
777         list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
778                 tun_enable_queue(tfile);
779                 tun_queue_purge(tfile);
780                 xdp_rxq_info_unreg(&tfile->xdp_rxq);
781                 sock_put(&tfile->sk);
782         }
783         BUG_ON(tun->numdisabled != 0);
784
785         if (tun->flags & IFF_PERSIST)
786                 module_put(THIS_MODULE);
787 }
788
789 static int tun_attach(struct tun_struct *tun, struct file *file,
790                       bool skip_filter, bool napi, bool napi_frags)
791 {
792         struct tun_file *tfile = file->private_data;
793         struct net_device *dev = tun->dev;
794         int err;
795
796         err = security_tun_dev_attach(tfile->socket.sk, tun->security);
797         if (err < 0)
798                 goto out;
799
800         err = -EINVAL;
801         if (rtnl_dereference(tfile->tun) && !tfile->detached)
802                 goto out;
803
804         err = -EBUSY;
805         if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
806                 goto out;
807
808         err = -E2BIG;
809         if (!tfile->detached &&
810             tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
811                 goto out;
812
813         err = 0;
814
815         /* Re-attach the filter to persist device */
816         if (!skip_filter && (tun->filter_attached == true)) {
817                 lock_sock(tfile->socket.sk);
818                 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
819                 release_sock(tfile->socket.sk);
820                 if (!err)
821                         goto out;
822         }
823
824         if (!tfile->detached &&
825             ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
826                             GFP_KERNEL, tun_ptr_free)) {
827                 err = -ENOMEM;
828                 goto out;
829         }
830
831         tfile->queue_index = tun->numqueues;
832         tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
833
834         if (tfile->detached) {
835                 /* Re-attach detached tfile, updating XDP queue_index */
836                 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
837
838                 if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
839                         tfile->xdp_rxq.queue_index = tfile->queue_index;
840         } else {
841                 /* Setup XDP RX-queue info, for new tfile getting attached */
842                 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
843                                        tun->dev, tfile->queue_index);
844                 if (err < 0)
845                         goto out;
846                 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
847                                                  MEM_TYPE_PAGE_SHARED, NULL);
848                 if (err < 0) {
849                         xdp_rxq_info_unreg(&tfile->xdp_rxq);
850                         goto out;
851                 }
852                 err = 0;
853         }
854
855         if (tfile->detached) {
856                 tun_enable_queue(tfile);
857         } else {
858                 sock_hold(&tfile->sk);
859                 tun_napi_init(tun, tfile, napi, napi_frags);
860         }
861
862         if (rtnl_dereference(tun->xdp_prog))
863                 sock_set_flag(&tfile->sk, SOCK_XDP);
864
865         /* device is allowed to go away first, so no need to hold extra
866          * refcnt.
867          */
868
869         /* Publish tfile->tun and tun->tfiles only after we've fully
870          * initialized tfile; otherwise we risk using half-initialized
871          * object.
872          */
873         rcu_assign_pointer(tfile->tun, tun);
874         rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
875         tun->numqueues++;
876         tun_set_real_num_queues(tun);
877 out:
878         return err;
879 }
880
881 static struct tun_struct *tun_get(struct tun_file *tfile)
882 {
883         struct tun_struct *tun;
884
885         rcu_read_lock();
886         tun = rcu_dereference(tfile->tun);
887         if (tun)
888                 dev_hold(tun->dev);
889         rcu_read_unlock();
890
891         return tun;
892 }
893
894 static void tun_put(struct tun_struct *tun)
895 {
896         dev_put(tun->dev);
897 }
898
899 /* TAP filtering */
900 static void addr_hash_set(u32 *mask, const u8 *addr)
901 {
902         int n = ether_crc(ETH_ALEN, addr) >> 26;
903         mask[n >> 5] |= (1 << (n & 31));
904 }
905
906 static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
907 {
908         int n = ether_crc(ETH_ALEN, addr) >> 26;
909         return mask[n >> 5] & (1 << (n & 31));
910 }
911
912 static int update_filter(struct tap_filter *filter, void __user *arg)
913 {
914         struct { u8 u[ETH_ALEN]; } *addr;
915         struct tun_filter uf;
916         int err, alen, n, nexact;
917
918         if (copy_from_user(&uf, arg, sizeof(uf)))
919                 return -EFAULT;
920
921         if (!uf.count) {
922                 /* Disabled */
923                 filter->count = 0;
924                 return 0;
925         }
926
927         alen = ETH_ALEN * uf.count;
928         addr = memdup_user(arg + sizeof(uf), alen);
929         if (IS_ERR(addr))
930                 return PTR_ERR(addr);
931
932         /* The filter is updated without holding any locks. Which is
933          * perfectly safe. We disable it first and in the worst
934          * case we'll accept a few undesired packets. */
935         filter->count = 0;
936         wmb();
937
938         /* Use first set of addresses as an exact filter */
939         for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
940                 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
941
942         nexact = n;
943
944         /* Remaining multicast addresses are hashed,
945          * unicast will leave the filter disabled. */
946         memset(filter->mask, 0, sizeof(filter->mask));
947         for (; n < uf.count; n++) {
948                 if (!is_multicast_ether_addr(addr[n].u)) {
949                         err = 0; /* no filter */
950                         goto free_addr;
951                 }
952                 addr_hash_set(filter->mask, addr[n].u);
953         }
954
955         /* For ALLMULTI just set the mask to all ones.
956          * This overrides the mask populated above. */
957         if ((uf.flags & TUN_FLT_ALLMULTI))
958                 memset(filter->mask, ~0, sizeof(filter->mask));
959
960         /* Now enable the filter */
961         wmb();
962         filter->count = nexact;
963
964         /* Return the number of exact filters */
965         err = nexact;
966 free_addr:
967         kfree(addr);
968         return err;
969 }
970
971 /* Returns: 0 - drop, !=0 - accept */
972 static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
973 {
974         /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
975          * at this point. */
976         struct ethhdr *eh = (struct ethhdr *) skb->data;
977         int i;
978
979         /* Exact match */
980         for (i = 0; i < filter->count; i++)
981                 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
982                         return 1;
983
984         /* Inexact match (multicast only) */
985         if (is_multicast_ether_addr(eh->h_dest))
986                 return addr_hash_test(filter->mask, eh->h_dest);
987
988         return 0;
989 }
990
991 /*
992  * Checks whether the packet is accepted or not.
993  * Returns: 0 - drop, !=0 - accept
994  */
995 static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
996 {
997         if (!filter->count)
998                 return 1;
999
1000         return run_filter(filter, skb);
1001 }
1002
1003 /* Network device part of the driver */
1004
1005 static const struct ethtool_ops tun_ethtool_ops;
1006
1007 /* Net device detach from fd. */
1008 static void tun_net_uninit(struct net_device *dev)
1009 {
1010         tun_detach_all(dev);
1011 }
1012
1013 /* Net device open. */
1014 static int tun_net_open(struct net_device *dev)
1015 {
1016         netif_tx_start_all_queues(dev);
1017
1018         return 0;
1019 }
1020
1021 /* Net device close. */
1022 static int tun_net_close(struct net_device *dev)
1023 {
1024         netif_tx_stop_all_queues(dev);
1025         return 0;
1026 }
1027
1028 /* Net device start xmit */
1029 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1030 {
1031 #ifdef CONFIG_RPS
1032         if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1033                 /* Select queue was not called for the skbuff, so we extract the
1034                  * RPS hash and save it into the flow_table here.
1035                  */
1036                 struct tun_flow_entry *e;
1037                 __u32 rxhash;
1038
1039                 rxhash = __skb_get_hash_symmetric(skb);
1040                 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1041                 if (e)
1042                         tun_flow_save_rps_rxhash(e, rxhash);
1043         }
1044 #endif
1045 }
1046
1047 static unsigned int run_ebpf_filter(struct tun_struct *tun,
1048                                     struct sk_buff *skb,
1049                                     int len)
1050 {
1051         struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1052
1053         if (prog)
1054                 len = bpf_prog_run_clear_cb(prog->prog, skb);
1055
1056         return len;
1057 }
1058
1059 /* Net device start xmit */
1060 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1061 {
1062         struct tun_struct *tun = netdev_priv(dev);
1063         int txq = skb->queue_mapping;
1064         struct tun_file *tfile;
1065         int len = skb->len;
1066
1067         rcu_read_lock();
1068         tfile = rcu_dereference(tun->tfiles[txq]);
1069
1070         /* Drop packet if interface is not attached */
1071         if (!tfile)
1072                 goto drop;
1073
1074         if (!rcu_dereference(tun->steering_prog))
1075                 tun_automq_xmit(tun, skb);
1076
1077         tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
1078
1079         BUG_ON(!tfile);
1080
1081         /* Drop if the filter does not like it.
1082          * This is a noop if the filter is disabled.
1083          * Filter can be enabled only for the TAP devices. */
1084         if (!check_filter(&tun->txflt, skb))
1085                 goto drop;
1086
1087         if (tfile->socket.sk->sk_filter &&
1088             sk_filter(tfile->socket.sk, skb))
1089                 goto drop;
1090
1091         len = run_ebpf_filter(tun, skb, len);
1092         if (len == 0 || pskb_trim(skb, len))
1093                 goto drop;
1094
1095         if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1096                 goto drop;
1097
1098         skb_tx_timestamp(skb);
1099
1100         /* Orphan the skb - required as we might hang on to it
1101          * for indefinite time.
1102          */
1103         skb_orphan(skb);
1104
1105         nf_reset(skb);
1106
1107         if (ptr_ring_produce(&tfile->tx_ring, skb))
1108                 goto drop;
1109
1110         /* Notify and wake up reader process */
1111         if (tfile->flags & TUN_FASYNC)
1112                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1113         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1114
1115         rcu_read_unlock();
1116         return NETDEV_TX_OK;
1117
1118 drop:
1119         this_cpu_inc(tun->pcpu_stats->tx_dropped);
1120         skb_tx_error(skb);
1121         kfree_skb(skb);
1122         rcu_read_unlock();
1123         return NET_XMIT_DROP;
1124 }
1125
1126 static void tun_net_mclist(struct net_device *dev)
1127 {
1128         /*
1129          * This callback is supposed to deal with mc filter in
1130          * _rx_ path and has nothing to do with the _tx_ path.
1131          * In rx path we always accept everything userspace gives us.
1132          */
1133 }
1134
1135 static netdev_features_t tun_net_fix_features(struct net_device *dev,
1136         netdev_features_t features)
1137 {
1138         struct tun_struct *tun = netdev_priv(dev);
1139
1140         return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1141 }
1142
1143 static void tun_set_headroom(struct net_device *dev, int new_hr)
1144 {
1145         struct tun_struct *tun = netdev_priv(dev);
1146
1147         if (new_hr < NET_SKB_PAD)
1148                 new_hr = NET_SKB_PAD;
1149
1150         tun->align = new_hr;
1151 }
1152
1153 static void
1154 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1155 {
1156         u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1157         struct tun_struct *tun = netdev_priv(dev);
1158         struct tun_pcpu_stats *p;
1159         int i;
1160
1161         for_each_possible_cpu(i) {
1162                 u64 rxpackets, rxbytes, txpackets, txbytes;
1163                 unsigned int start;
1164
1165                 p = per_cpu_ptr(tun->pcpu_stats, i);
1166                 do {
1167                         start = u64_stats_fetch_begin(&p->syncp);
1168                         rxpackets       = p->rx_packets;
1169                         rxbytes         = p->rx_bytes;
1170                         txpackets       = p->tx_packets;
1171                         txbytes         = p->tx_bytes;
1172                 } while (u64_stats_fetch_retry(&p->syncp, start));
1173
1174                 stats->rx_packets       += rxpackets;
1175                 stats->rx_bytes         += rxbytes;
1176                 stats->tx_packets       += txpackets;
1177                 stats->tx_bytes         += txbytes;
1178
1179                 /* u32 counters */
1180                 rx_dropped      += p->rx_dropped;
1181                 rx_frame_errors += p->rx_frame_errors;
1182                 tx_dropped      += p->tx_dropped;
1183         }
1184         stats->rx_dropped  = rx_dropped;
1185         stats->rx_frame_errors = rx_frame_errors;
1186         stats->tx_dropped = tx_dropped;
1187 }
1188
1189 static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1190                        struct netlink_ext_ack *extack)
1191 {
1192         struct tun_struct *tun = netdev_priv(dev);
1193         struct tun_file *tfile;
1194         struct bpf_prog *old_prog;
1195         int i;
1196
1197         old_prog = rtnl_dereference(tun->xdp_prog);
1198         rcu_assign_pointer(tun->xdp_prog, prog);
1199         if (old_prog)
1200                 bpf_prog_put(old_prog);
1201
1202         for (i = 0; i < tun->numqueues; i++) {
1203                 tfile = rtnl_dereference(tun->tfiles[i]);
1204                 if (prog)
1205                         sock_set_flag(&tfile->sk, SOCK_XDP);
1206                 else
1207                         sock_reset_flag(&tfile->sk, SOCK_XDP);
1208         }
1209         list_for_each_entry(tfile, &tun->disabled, next) {
1210                 if (prog)
1211                         sock_set_flag(&tfile->sk, SOCK_XDP);
1212                 else
1213                         sock_reset_flag(&tfile->sk, SOCK_XDP);
1214         }
1215
1216         return 0;
1217 }
1218
1219 static u32 tun_xdp_query(struct net_device *dev)
1220 {
1221         struct tun_struct *tun = netdev_priv(dev);
1222         const struct bpf_prog *xdp_prog;
1223
1224         xdp_prog = rtnl_dereference(tun->xdp_prog);
1225         if (xdp_prog)
1226                 return xdp_prog->aux->id;
1227
1228         return 0;
1229 }
1230
1231 static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1232 {
1233         switch (xdp->command) {
1234         case XDP_SETUP_PROG:
1235                 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1236         case XDP_QUERY_PROG:
1237                 xdp->prog_id = tun_xdp_query(dev);
1238                 return 0;
1239         default:
1240                 return -EINVAL;
1241         }
1242 }
1243
1244 static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1245 {
1246         if (new_carrier) {
1247                 struct tun_struct *tun = netdev_priv(dev);
1248
1249                 if (!tun->numqueues)
1250                         return -EPERM;
1251
1252                 netif_carrier_on(dev);
1253         } else {
1254                 netif_carrier_off(dev);
1255         }
1256         return 0;
1257 }
1258
1259 static const struct net_device_ops tun_netdev_ops = {
1260         .ndo_uninit             = tun_net_uninit,
1261         .ndo_open               = tun_net_open,
1262         .ndo_stop               = tun_net_close,
1263         .ndo_start_xmit         = tun_net_xmit,
1264         .ndo_fix_features       = tun_net_fix_features,
1265         .ndo_select_queue       = tun_select_queue,
1266         .ndo_set_rx_headroom    = tun_set_headroom,
1267         .ndo_get_stats64        = tun_net_get_stats64,
1268         .ndo_change_carrier     = tun_net_change_carrier,
1269 };
1270
1271 static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1272 {
1273         /* Notify and wake up reader process */
1274         if (tfile->flags & TUN_FASYNC)
1275                 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1276         tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1277 }
1278
1279 static int tun_xdp_xmit(struct net_device *dev, int n,
1280                         struct xdp_frame **frames, u32 flags)
1281 {
1282         struct tun_struct *tun = netdev_priv(dev);
1283         struct tun_file *tfile;
1284         u32 numqueues;
1285         int drops = 0;
1286         int cnt = n;
1287         int i;
1288
1289         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1290                 return -EINVAL;
1291
1292         rcu_read_lock();
1293
1294 resample:
1295         numqueues = READ_ONCE(tun->numqueues);
1296         if (!numqueues) {
1297                 rcu_read_unlock();
1298                 return -ENXIO; /* Caller will free/return all frames */
1299         }
1300
1301         tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1302                                             numqueues]);
1303         if (unlikely(!tfile))
1304                 goto resample;
1305
1306         spin_lock(&tfile->tx_ring.producer_lock);
1307         for (i = 0; i < n; i++) {
1308                 struct xdp_frame *xdp = frames[i];
1309                 /* Encode the XDP flag into lowest bit for consumer to differ
1310                  * XDP buffer from sk_buff.
1311                  */
1312                 void *frame = tun_xdp_to_ptr(xdp);
1313
1314                 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1315                         this_cpu_inc(tun->pcpu_stats->tx_dropped);
1316                         xdp_return_frame_rx_napi(xdp);
1317                         drops++;
1318                 }
1319         }
1320         spin_unlock(&tfile->tx_ring.producer_lock);
1321
1322         if (flags & XDP_XMIT_FLUSH)
1323                 __tun_xdp_flush_tfile(tfile);
1324
1325         rcu_read_unlock();
1326         return cnt - drops;
1327 }
1328
1329 static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1330 {
1331         struct xdp_frame *frame = convert_to_xdp_frame(xdp);
1332
1333         if (unlikely(!frame))
1334                 return -EOVERFLOW;
1335
1336         return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1337 }
1338
1339 static const struct net_device_ops tap_netdev_ops = {
1340         .ndo_uninit             = tun_net_uninit,
1341         .ndo_open               = tun_net_open,
1342         .ndo_stop               = tun_net_close,
1343         .ndo_start_xmit         = tun_net_xmit,
1344         .ndo_fix_features       = tun_net_fix_features,
1345         .ndo_set_rx_mode        = tun_net_mclist,
1346         .ndo_set_mac_address    = eth_mac_addr,
1347         .ndo_validate_addr      = eth_validate_addr,
1348         .ndo_select_queue       = tun_select_queue,
1349         .ndo_features_check     = passthru_features_check,
1350         .ndo_set_rx_headroom    = tun_set_headroom,
1351         .ndo_get_stats64        = tun_net_get_stats64,
1352         .ndo_bpf                = tun_xdp,
1353         .ndo_xdp_xmit           = tun_xdp_xmit,
1354         .ndo_change_carrier     = tun_net_change_carrier,
1355 };
1356
1357 static void tun_flow_init(struct tun_struct *tun)
1358 {
1359         int i;
1360
1361         for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1362                 INIT_HLIST_HEAD(&tun->flows[i]);
1363
1364         tun->ageing_time = TUN_FLOW_EXPIRE;
1365         timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1366         mod_timer(&tun->flow_gc_timer,
1367                   round_jiffies_up(jiffies + tun->ageing_time));
1368 }
1369
1370 static void tun_flow_uninit(struct tun_struct *tun)
1371 {
1372         del_timer_sync(&tun->flow_gc_timer);
1373         tun_flow_flush(tun);
1374 }
1375
1376 #define MIN_MTU 68
1377 #define MAX_MTU 65535
1378
1379 /* Initialize net device. */
1380 static void tun_net_init(struct net_device *dev)
1381 {
1382         struct tun_struct *tun = netdev_priv(dev);
1383
1384         switch (tun->flags & TUN_TYPE_MASK) {
1385         case IFF_TUN:
1386                 dev->netdev_ops = &tun_netdev_ops;
1387
1388                 /* Point-to-Point TUN Device */
1389                 dev->hard_header_len = 0;
1390                 dev->addr_len = 0;
1391                 dev->mtu = 1500;
1392
1393                 /* Zero header length */
1394                 dev->type = ARPHRD_NONE;
1395                 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1396                 break;
1397
1398         case IFF_TAP:
1399                 dev->netdev_ops = &tap_netdev_ops;
1400                 /* Ethernet TAP Device */
1401                 ether_setup(dev);
1402                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1403                 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1404
1405                 eth_hw_addr_random(dev);
1406
1407                 break;
1408         }
1409
1410         dev->min_mtu = MIN_MTU;
1411         dev->max_mtu = MAX_MTU - dev->hard_header_len;
1412 }
1413
1414 static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1415 {
1416         struct sock *sk = tfile->socket.sk;
1417
1418         return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1419 }
1420
1421 /* Character device part */
1422
1423 /* Poll */
1424 static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1425 {
1426         struct tun_file *tfile = file->private_data;
1427         struct tun_struct *tun = tun_get(tfile);
1428         struct sock *sk;
1429         __poll_t mask = 0;
1430
1431         if (!tun)
1432                 return EPOLLERR;
1433
1434         sk = tfile->socket.sk;
1435
1436         tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1437
1438         poll_wait(file, sk_sleep(sk), wait);
1439
1440         if (!ptr_ring_empty(&tfile->tx_ring))
1441                 mask |= EPOLLIN | EPOLLRDNORM;
1442
1443         /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1444          * guarantee EPOLLOUT to be raised by either here or
1445          * tun_sock_write_space(). Then process could get notification
1446          * after it writes to a down device and meets -EIO.
1447          */
1448         if (tun_sock_writeable(tun, tfile) ||
1449             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1450              tun_sock_writeable(tun, tfile)))
1451                 mask |= EPOLLOUT | EPOLLWRNORM;
1452
1453         if (tun->dev->reg_state != NETREG_REGISTERED)
1454                 mask = EPOLLERR;
1455
1456         tun_put(tun);
1457         return mask;
1458 }
1459
1460 static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1461                                             size_t len,
1462                                             const struct iov_iter *it)
1463 {
1464         struct sk_buff *skb;
1465         size_t linear;
1466         int err;
1467         int i;
1468
1469         if (it->nr_segs > MAX_SKB_FRAGS + 1)
1470                 return ERR_PTR(-ENOMEM);
1471
1472         local_bh_disable();
1473         skb = napi_get_frags(&tfile->napi);
1474         local_bh_enable();
1475         if (!skb)
1476                 return ERR_PTR(-ENOMEM);
1477
1478         linear = iov_iter_single_seg_count(it);
1479         err = __skb_grow(skb, linear);
1480         if (err)
1481                 goto free;
1482
1483         skb->len = len;
1484         skb->data_len = len - linear;
1485         skb->truesize += skb->data_len;
1486
1487         for (i = 1; i < it->nr_segs; i++) {
1488                 size_t fragsz = it->iov[i].iov_len;
1489                 struct page *page;
1490                 void *frag;
1491
1492                 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1493                         err = -EINVAL;
1494                         goto free;
1495                 }
1496                 frag = netdev_alloc_frag(fragsz);
1497                 if (!frag) {
1498                         err = -ENOMEM;
1499                         goto free;
1500                 }
1501                 page = virt_to_head_page(frag);
1502                 skb_fill_page_desc(skb, i - 1, page,
1503                                    frag - page_address(page), fragsz);
1504         }
1505
1506         return skb;
1507 free:
1508         /* frees skb and all frags allocated with napi_alloc_frag() */
1509         napi_free_frags(&tfile->napi);
1510         return ERR_PTR(err);
1511 }
1512
1513 /* prepad is the amount to reserve at front.  len is length after that.
1514  * linear is a hint as to how much to copy (usually headers). */
1515 static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1516                                      size_t prepad, size_t len,
1517                                      size_t linear, int noblock)
1518 {
1519         struct sock *sk = tfile->socket.sk;
1520         struct sk_buff *skb;
1521         int err;
1522
1523         /* Under a page?  Don't bother with paged skb. */
1524         if (prepad + len < PAGE_SIZE || !linear)
1525                 linear = len;
1526
1527         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1528                                    &err, 0);
1529         if (!skb)
1530                 return ERR_PTR(err);
1531
1532         skb_reserve(skb, prepad);
1533         skb_put(skb, linear);
1534         skb->data_len = len - linear;
1535         skb->len += len - linear;
1536
1537         return skb;
1538 }
1539
1540 static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1541                            struct sk_buff *skb, int more)
1542 {
1543         struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1544         struct sk_buff_head process_queue;
1545         u32 rx_batched = tun->rx_batched;
1546         bool rcv = false;
1547
1548         if (!rx_batched || (!more && skb_queue_empty(queue))) {
1549                 local_bh_disable();
1550                 skb_record_rx_queue(skb, tfile->queue_index);
1551                 netif_receive_skb(skb);
1552                 local_bh_enable();
1553                 return;
1554         }
1555
1556         spin_lock(&queue->lock);
1557         if (!more || skb_queue_len(queue) == rx_batched) {
1558                 __skb_queue_head_init(&process_queue);
1559                 skb_queue_splice_tail_init(queue, &process_queue);
1560                 rcv = true;
1561         } else {
1562                 __skb_queue_tail(queue, skb);
1563         }
1564         spin_unlock(&queue->lock);
1565
1566         if (rcv) {
1567                 struct sk_buff *nskb;
1568
1569                 local_bh_disable();
1570                 while ((nskb = __skb_dequeue(&process_queue))) {
1571                         skb_record_rx_queue(nskb, tfile->queue_index);
1572                         netif_receive_skb(nskb);
1573                 }
1574                 skb_record_rx_queue(skb, tfile->queue_index);
1575                 netif_receive_skb(skb);
1576                 local_bh_enable();
1577         }
1578 }
1579
1580 static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1581                               int len, int noblock, bool zerocopy)
1582 {
1583         if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1584                 return false;
1585
1586         if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1587                 return false;
1588
1589         if (!noblock)
1590                 return false;
1591
1592         if (zerocopy)
1593                 return false;
1594
1595         if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1596             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1597                 return false;
1598
1599         return true;
1600 }
1601
1602 static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
1603                                        int buflen, int len, int pad)
1604 {
1605         struct sk_buff *skb = build_skb(buf, buflen);
1606
1607         if (!skb)
1608                 return ERR_PTR(-ENOMEM);
1609
1610         skb_reserve(skb, pad);
1611         skb_put(skb, len);
1612
1613         get_page(alloc_frag->page);
1614         alloc_frag->offset += buflen;
1615
1616         return skb;
1617 }
1618
1619 static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1620                        struct xdp_buff *xdp, u32 act)
1621 {
1622         int err;
1623
1624         switch (act) {
1625         case XDP_REDIRECT:
1626                 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1627                 if (err)
1628                         return err;
1629                 break;
1630         case XDP_TX:
1631                 err = tun_xdp_tx(tun->dev, xdp);
1632                 if (err < 0)
1633                         return err;
1634                 break;
1635         case XDP_PASS:
1636                 break;
1637         default:
1638                 bpf_warn_invalid_xdp_action(act);
1639                 /* fall through */
1640         case XDP_ABORTED:
1641                 trace_xdp_exception(tun->dev, xdp_prog, act);
1642                 /* fall through */
1643         case XDP_DROP:
1644                 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1645                 break;
1646         }
1647
1648         return act;
1649 }
1650
1651 static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1652                                      struct tun_file *tfile,
1653                                      struct iov_iter *from,
1654                                      struct virtio_net_hdr *hdr,
1655                                      int len, int *skb_xdp)
1656 {
1657         struct page_frag *alloc_frag = &current->task_frag;
1658         struct bpf_prog *xdp_prog;
1659         int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1660         char *buf;
1661         size_t copied;
1662         int pad = TUN_RX_PAD;
1663         int err = 0;
1664
1665         rcu_read_lock();
1666         xdp_prog = rcu_dereference(tun->xdp_prog);
1667         if (xdp_prog)
1668                 pad += XDP_PACKET_HEADROOM;
1669         buflen += SKB_DATA_ALIGN(len + pad);
1670         rcu_read_unlock();
1671
1672         alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1673         if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1674                 return ERR_PTR(-ENOMEM);
1675
1676         buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1677         copied = copy_page_from_iter(alloc_frag->page,
1678                                      alloc_frag->offset + pad,
1679                                      len, from);
1680         if (copied != len)
1681                 return ERR_PTR(-EFAULT);
1682
1683         /* There's a small window that XDP may be set after the check
1684          * of xdp_prog above, this should be rare and for simplicity
1685          * we do XDP on skb in case the headroom is not enough.
1686          */
1687         if (hdr->gso_type || !xdp_prog) {
1688                 *skb_xdp = 1;
1689                 return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
1690         }
1691
1692         *skb_xdp = 0;
1693
1694         local_bh_disable();
1695         rcu_read_lock();
1696         xdp_prog = rcu_dereference(tun->xdp_prog);
1697         if (xdp_prog) {
1698                 struct xdp_buff xdp;
1699                 u32 act;
1700
1701                 xdp.data_hard_start = buf;
1702                 xdp.data = buf + pad;
1703                 xdp_set_data_meta_invalid(&xdp);
1704                 xdp.data_end = xdp.data + len;
1705                 xdp.rxq = &tfile->xdp_rxq;
1706
1707                 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1708                 if (act == XDP_REDIRECT || act == XDP_TX) {
1709                         get_page(alloc_frag->page);
1710                         alloc_frag->offset += buflen;
1711                 }
1712                 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1713                 if (err < 0)
1714                         goto err_xdp;
1715                 if (err == XDP_REDIRECT)
1716                         xdp_do_flush_map();
1717                 if (err != XDP_PASS)
1718                         goto out;
1719
1720                 pad = xdp.data - xdp.data_hard_start;
1721                 len = xdp.data_end - xdp.data;
1722         }
1723         rcu_read_unlock();
1724         local_bh_enable();
1725
1726         return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
1727
1728 err_xdp:
1729         put_page(alloc_frag->page);
1730 out:
1731         rcu_read_unlock();
1732         local_bh_enable();
1733         return NULL;
1734 }
1735
1736 /* Get packet from user space buffer */
1737 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1738                             void *msg_control, struct iov_iter *from,
1739                             int noblock, bool more)
1740 {
1741         struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1742         struct sk_buff *skb;
1743         size_t total_len = iov_iter_count(from);
1744         size_t len = total_len, align = tun->align, linear;
1745         struct virtio_net_hdr gso = { 0 };
1746         struct tun_pcpu_stats *stats;
1747         int good_linear;
1748         int copylen;
1749         bool zerocopy = false;
1750         int err;
1751         u32 rxhash = 0;
1752         int skb_xdp = 1;
1753         bool frags = tun_napi_frags_enabled(tfile);
1754
1755         if (!(tun->flags & IFF_NO_PI)) {
1756                 if (len < sizeof(pi))
1757                         return -EINVAL;
1758                 len -= sizeof(pi);
1759
1760                 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1761                         return -EFAULT;
1762         }
1763
1764         if (tun->flags & IFF_VNET_HDR) {
1765                 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1766
1767                 if (len < vnet_hdr_sz)
1768                         return -EINVAL;
1769                 len -= vnet_hdr_sz;
1770
1771                 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1772                         return -EFAULT;
1773
1774                 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1775                     tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1776                         gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1777
1778                 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1779                         return -EINVAL;
1780                 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1781         }
1782
1783         if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1784                 align += NET_IP_ALIGN;
1785                 if (unlikely(len < ETH_HLEN ||
1786                              (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1787                         return -EINVAL;
1788         }
1789
1790         good_linear = SKB_MAX_HEAD(align);
1791
1792         if (msg_control) {
1793                 struct iov_iter i = *from;
1794
1795                 /* There are 256 bytes to be copied in skb, so there is
1796                  * enough room for skb expand head in case it is used.
1797                  * The rest of the buffer is mapped from userspace.
1798                  */
1799                 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1800                 if (copylen > good_linear)
1801                         copylen = good_linear;
1802                 linear = copylen;
1803                 iov_iter_advance(&i, copylen);
1804                 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1805                         zerocopy = true;
1806         }
1807
1808         if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1809                 /* For the packet that is not easy to be processed
1810                  * (e.g gso or jumbo packet), we will do it at after
1811                  * skb was created with generic XDP routine.
1812                  */
1813                 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1814                 if (IS_ERR(skb)) {
1815                         this_cpu_inc(tun->pcpu_stats->rx_dropped);
1816                         return PTR_ERR(skb);
1817                 }
1818                 if (!skb)
1819                         return total_len;
1820         } else {
1821                 if (!zerocopy) {
1822                         copylen = len;
1823                         if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1824                                 linear = good_linear;
1825                         else
1826                                 linear = tun16_to_cpu(tun, gso.hdr_len);
1827                 }
1828
1829                 if (frags) {
1830                         mutex_lock(&tfile->napi_mutex);
1831                         skb = tun_napi_alloc_frags(tfile, copylen, from);
1832                         /* tun_napi_alloc_frags() enforces a layout for the skb.
1833                          * If zerocopy is enabled, then this layout will be
1834                          * overwritten by zerocopy_sg_from_iter().
1835                          */
1836                         zerocopy = false;
1837                 } else {
1838                         skb = tun_alloc_skb(tfile, align, copylen, linear,
1839                                             noblock);
1840                 }
1841
1842                 if (IS_ERR(skb)) {
1843                         if (PTR_ERR(skb) != -EAGAIN)
1844                                 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1845                         if (frags)
1846                                 mutex_unlock(&tfile->napi_mutex);
1847                         return PTR_ERR(skb);
1848                 }
1849
1850                 if (zerocopy)
1851                         err = zerocopy_sg_from_iter(skb, from);
1852                 else
1853                         err = skb_copy_datagram_from_iter(skb, 0, from, len);
1854
1855                 if (err) {
1856                         err = -EFAULT;
1857 drop:
1858                         this_cpu_inc(tun->pcpu_stats->rx_dropped);
1859                         kfree_skb(skb);
1860                         if (frags) {
1861                                 tfile->napi.skb = NULL;
1862                                 mutex_unlock(&tfile->napi_mutex);
1863                         }
1864
1865                         return err;
1866                 }
1867         }
1868
1869         if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1870                 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1871                 kfree_skb(skb);
1872                 if (frags) {
1873                         tfile->napi.skb = NULL;
1874                         mutex_unlock(&tfile->napi_mutex);
1875                 }
1876
1877                 return -EINVAL;
1878         }
1879
1880         switch (tun->flags & TUN_TYPE_MASK) {
1881         case IFF_TUN:
1882                 if (tun->flags & IFF_NO_PI) {
1883                         u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1884
1885                         switch (ip_version) {
1886                         case 4:
1887                                 pi.proto = htons(ETH_P_IP);
1888                                 break;
1889                         case 6:
1890                                 pi.proto = htons(ETH_P_IPV6);
1891                                 break;
1892                         default:
1893                                 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1894                                 kfree_skb(skb);
1895                                 return -EINVAL;
1896                         }
1897                 }
1898
1899                 skb_reset_mac_header(skb);
1900                 skb->protocol = pi.proto;
1901                 skb->dev = tun->dev;
1902                 break;
1903         case IFF_TAP:
1904                 if (!frags)
1905                         skb->protocol = eth_type_trans(skb, tun->dev);
1906                 break;
1907         }
1908
1909         /* copy skb_ubuf_info for callback when skb has no error */
1910         if (zerocopy) {
1911                 skb_shinfo(skb)->destructor_arg = msg_control;
1912                 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1913                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1914         } else if (msg_control) {
1915                 struct ubuf_info *uarg = msg_control;
1916                 uarg->callback(uarg, false);
1917         }
1918
1919         skb_reset_network_header(skb);
1920         skb_probe_transport_header(skb);
1921
1922         if (skb_xdp) {
1923                 struct bpf_prog *xdp_prog;
1924                 int ret;
1925
1926                 local_bh_disable();
1927                 rcu_read_lock();
1928                 xdp_prog = rcu_dereference(tun->xdp_prog);
1929                 if (xdp_prog) {
1930                         ret = do_xdp_generic(xdp_prog, skb);
1931                         if (ret != XDP_PASS) {
1932                                 rcu_read_unlock();
1933                                 local_bh_enable();
1934                                 return total_len;
1935                         }
1936                 }
1937                 rcu_read_unlock();
1938                 local_bh_enable();
1939         }
1940
1941         /* Compute the costly rx hash only if needed for flow updates.
1942          * We may get a very small possibility of OOO during switching, not
1943          * worth to optimize.
1944          */
1945         if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1946             !tfile->detached)
1947                 rxhash = __skb_get_hash_symmetric(skb);
1948
1949         rcu_read_lock();
1950         if (unlikely(!(tun->dev->flags & IFF_UP))) {
1951                 err = -EIO;
1952                 rcu_read_unlock();
1953                 goto drop;
1954         }
1955
1956         if (frags) {
1957                 /* Exercise flow dissector code path. */
1958                 u32 headlen = eth_get_headlen(tun->dev, skb->data,
1959                                               skb_headlen(skb));
1960
1961                 if (unlikely(headlen > skb_headlen(skb))) {
1962                         this_cpu_inc(tun->pcpu_stats->rx_dropped);
1963                         napi_free_frags(&tfile->napi);
1964                         rcu_read_unlock();
1965                         mutex_unlock(&tfile->napi_mutex);
1966                         WARN_ON(1);
1967                         return -ENOMEM;
1968                 }
1969
1970                 local_bh_disable();
1971                 napi_gro_frags(&tfile->napi);
1972                 local_bh_enable();
1973                 mutex_unlock(&tfile->napi_mutex);
1974         } else if (tfile->napi_enabled) {
1975                 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1976                 int queue_len;
1977
1978                 spin_lock_bh(&queue->lock);
1979                 __skb_queue_tail(queue, skb);
1980                 queue_len = skb_queue_len(queue);
1981                 spin_unlock(&queue->lock);
1982
1983                 if (!more || queue_len > NAPI_POLL_WEIGHT)
1984                         napi_schedule(&tfile->napi);
1985
1986                 local_bh_enable();
1987         } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1988                 tun_rx_batched(tun, tfile, skb, more);
1989         } else {
1990                 netif_rx_ni(skb);
1991         }
1992         rcu_read_unlock();
1993
1994         stats = get_cpu_ptr(tun->pcpu_stats);
1995         u64_stats_update_begin(&stats->syncp);
1996         stats->rx_packets++;
1997         stats->rx_bytes += len;
1998         u64_stats_update_end(&stats->syncp);
1999         put_cpu_ptr(stats);
2000
2001         if (rxhash)
2002                 tun_flow_update(tun, rxhash, tfile);
2003
2004         return total_len;
2005 }
2006
2007 static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2008 {
2009         struct file *file = iocb->ki_filp;
2010         struct tun_file *tfile = file->private_data;
2011         struct tun_struct *tun = tun_get(tfile);
2012         ssize_t result;
2013
2014         if (!tun)
2015                 return -EBADFD;
2016
2017         result = tun_get_user(tun, tfile, NULL, from,
2018                               file->f_flags & O_NONBLOCK, false);
2019
2020         tun_put(tun);
2021         return result;
2022 }
2023
2024 static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2025                                 struct tun_file *tfile,
2026                                 struct xdp_frame *xdp_frame,
2027                                 struct iov_iter *iter)
2028 {
2029         int vnet_hdr_sz = 0;
2030         size_t size = xdp_frame->len;
2031         struct tun_pcpu_stats *stats;
2032         size_t ret;
2033
2034         if (tun->flags & IFF_VNET_HDR) {
2035                 struct virtio_net_hdr gso = { 0 };
2036
2037                 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2038                 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2039                         return -EINVAL;
2040                 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2041                              sizeof(gso)))
2042                         return -EFAULT;
2043                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2044         }
2045
2046         ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2047
2048         stats = get_cpu_ptr(tun->pcpu_stats);
2049         u64_stats_update_begin(&stats->syncp);
2050         stats->tx_packets++;
2051         stats->tx_bytes += ret;
2052         u64_stats_update_end(&stats->syncp);
2053         put_cpu_ptr(tun->pcpu_stats);
2054
2055         return ret;
2056 }
2057
2058 /* Put packet to the user space buffer */
2059 static ssize_t tun_put_user(struct tun_struct *tun,
2060                             struct tun_file *tfile,
2061                             struct sk_buff *skb,
2062                             struct iov_iter *iter)
2063 {
2064         struct tun_pi pi = { 0, skb->protocol };
2065         struct tun_pcpu_stats *stats;
2066         ssize_t total;
2067         int vlan_offset = 0;
2068         int vlan_hlen = 0;
2069         int vnet_hdr_sz = 0;
2070
2071         if (skb_vlan_tag_present(skb))
2072                 vlan_hlen = VLAN_HLEN;
2073
2074         if (tun->flags & IFF_VNET_HDR)
2075                 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2076
2077         total = skb->len + vlan_hlen + vnet_hdr_sz;
2078
2079         if (!(tun->flags & IFF_NO_PI)) {
2080                 if (iov_iter_count(iter) < sizeof(pi))
2081                         return -EINVAL;
2082
2083                 total += sizeof(pi);
2084                 if (iov_iter_count(iter) < total) {
2085                         /* Packet will be striped */
2086                         pi.flags |= TUN_PKT_STRIP;
2087                 }
2088
2089                 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2090                         return -EFAULT;
2091         }
2092
2093         if (vnet_hdr_sz) {
2094                 struct virtio_net_hdr gso;
2095
2096                 if (iov_iter_count(iter) < vnet_hdr_sz)
2097                         return -EINVAL;
2098
2099                 if (virtio_net_hdr_from_skb(skb, &gso,
2100                                             tun_is_little_endian(tun), true,
2101                                             vlan_hlen)) {
2102                         struct skb_shared_info *sinfo = skb_shinfo(skb);
2103                         pr_err("unexpected GSO type: "
2104                                "0x%x, gso_size %d, hdr_len %d\n",
2105                                sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2106                                tun16_to_cpu(tun, gso.hdr_len));
2107                         print_hex_dump(KERN_ERR, "tun: ",
2108                                        DUMP_PREFIX_NONE,
2109                                        16, 1, skb->head,
2110                                        min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2111                         WARN_ON_ONCE(1);
2112                         return -EINVAL;
2113                 }
2114
2115                 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2116                         return -EFAULT;
2117
2118                 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2119         }
2120
2121         if (vlan_hlen) {
2122                 int ret;
2123                 struct veth veth;
2124
2125                 veth.h_vlan_proto = skb->vlan_proto;
2126                 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2127
2128                 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2129
2130                 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2131                 if (ret || !iov_iter_count(iter))
2132                         goto done;
2133
2134                 ret = copy_to_iter(&veth, sizeof(veth), iter);
2135                 if (ret != sizeof(veth) || !iov_iter_count(iter))
2136                         goto done;
2137         }
2138
2139         skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2140
2141 done:
2142         /* caller is in process context, */
2143         stats = get_cpu_ptr(tun->pcpu_stats);
2144         u64_stats_update_begin(&stats->syncp);
2145         stats->tx_packets++;
2146         stats->tx_bytes += skb->len + vlan_hlen;
2147         u64_stats_update_end(&stats->syncp);
2148         put_cpu_ptr(tun->pcpu_stats);
2149
2150         return total;
2151 }
2152
2153 static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2154 {
2155         DECLARE_WAITQUEUE(wait, current);
2156         void *ptr = NULL;
2157         int error = 0;
2158
2159         ptr = ptr_ring_consume(&tfile->tx_ring);
2160         if (ptr)
2161                 goto out;
2162         if (noblock) {
2163                 error = -EAGAIN;
2164                 goto out;
2165         }
2166
2167         add_wait_queue(&tfile->socket.wq.wait, &wait);
2168
2169         while (1) {
2170                 set_current_state(TASK_INTERRUPTIBLE);
2171                 ptr = ptr_ring_consume(&tfile->tx_ring);
2172                 if (ptr)
2173                         break;
2174                 if (signal_pending(current)) {
2175                         error = -ERESTARTSYS;
2176                         break;
2177                 }
2178                 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2179                         error = -EFAULT;
2180                         break;
2181                 }
2182
2183                 schedule();
2184         }
2185
2186         __set_current_state(TASK_RUNNING);
2187         remove_wait_queue(&tfile->socket.wq.wait, &wait);
2188
2189 out:
2190         *err = error;
2191         return ptr;
2192 }
2193
2194 static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2195                            struct iov_iter *to,
2196                            int noblock, void *ptr)
2197 {
2198         ssize_t ret;
2199         int err;
2200
2201         tun_debug(KERN_INFO, tun, "tun_do_read\n");
2202
2203         if (!iov_iter_count(to)) {
2204                 tun_ptr_free(ptr);
2205                 return 0;
2206         }
2207
2208         if (!ptr) {
2209                 /* Read frames from ring */
2210                 ptr = tun_ring_recv(tfile, noblock, &err);
2211                 if (!ptr)
2212                         return err;
2213         }
2214
2215         if (tun_is_xdp_frame(ptr)) {
2216                 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2217
2218                 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2219                 xdp_return_frame(xdpf);
2220         } else {
2221                 struct sk_buff *skb = ptr;
2222
2223                 ret = tun_put_user(tun, tfile, skb, to);
2224                 if (unlikely(ret < 0))
2225                         kfree_skb(skb);
2226                 else
2227                         consume_skb(skb);
2228         }
2229
2230         return ret;
2231 }
2232
2233 static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2234 {
2235         struct file *file = iocb->ki_filp;
2236         struct tun_file *tfile = file->private_data;
2237         struct tun_struct *tun = tun_get(tfile);
2238         ssize_t len = iov_iter_count(to), ret;
2239
2240         if (!tun)
2241                 return -EBADFD;
2242         ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
2243         ret = min_t(ssize_t, ret, len);
2244         if (ret > 0)
2245                 iocb->ki_pos = ret;
2246         tun_put(tun);
2247         return ret;
2248 }
2249
2250 static void tun_prog_free(struct rcu_head *rcu)
2251 {
2252         struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2253
2254         bpf_prog_destroy(prog->prog);
2255         kfree(prog);
2256 }
2257
2258 static int __tun_set_ebpf(struct tun_struct *tun,
2259                           struct tun_prog __rcu **prog_p,
2260                           struct bpf_prog *prog)
2261 {
2262         struct tun_prog *old, *new = NULL;
2263
2264         if (prog) {
2265                 new = kmalloc(sizeof(*new), GFP_KERNEL);
2266                 if (!new)
2267                         return -ENOMEM;
2268                 new->prog = prog;
2269         }
2270
2271         spin_lock_bh(&tun->lock);
2272         old = rcu_dereference_protected(*prog_p,
2273                                         lockdep_is_held(&tun->lock));
2274         rcu_assign_pointer(*prog_p, new);
2275         spin_unlock_bh(&tun->lock);
2276
2277         if (old)
2278                 call_rcu(&old->rcu, tun_prog_free);
2279
2280         return 0;
2281 }
2282
2283 static void tun_free_netdev(struct net_device *dev)
2284 {
2285         struct tun_struct *tun = netdev_priv(dev);
2286
2287         BUG_ON(!(list_empty(&tun->disabled)));
2288         free_percpu(tun->pcpu_stats);
2289         tun_flow_uninit(tun);
2290         security_tun_dev_free_security(tun->security);
2291         __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2292         __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2293 }
2294
2295 static void tun_setup(struct net_device *dev)
2296 {
2297         struct tun_struct *tun = netdev_priv(dev);
2298
2299         tun->owner = INVALID_UID;
2300         tun->group = INVALID_GID;
2301         tun_default_link_ksettings(dev, &tun->link_ksettings);
2302
2303         dev->ethtool_ops = &tun_ethtool_ops;
2304         dev->needs_free_netdev = true;
2305         dev->priv_destructor = tun_free_netdev;
2306         /* We prefer our own queue length */
2307         dev->tx_queue_len = TUN_READQ_SIZE;
2308 }
2309
2310 /* Trivial set of netlink ops to allow deleting tun or tap
2311  * device with netlink.
2312  */
2313 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2314                         struct netlink_ext_ack *extack)
2315 {
2316         NL_SET_ERR_MSG(extack,
2317                        "tun/tap creation via rtnetlink is not supported.");
2318         return -EOPNOTSUPP;
2319 }
2320
2321 static size_t tun_get_size(const struct net_device *dev)
2322 {
2323         BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2324         BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2325
2326         return nla_total_size(sizeof(uid_t)) + /* OWNER */
2327                nla_total_size(sizeof(gid_t)) + /* GROUP */
2328                nla_total_size(sizeof(u8)) + /* TYPE */
2329                nla_total_size(sizeof(u8)) + /* PI */
2330                nla_total_size(sizeof(u8)) + /* VNET_HDR */
2331                nla_total_size(sizeof(u8)) + /* PERSIST */
2332                nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2333                nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2334                nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2335                0;
2336 }
2337
2338 static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2339 {
2340         struct tun_struct *tun = netdev_priv(dev);
2341
2342         if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2343                 goto nla_put_failure;
2344         if (uid_valid(tun->owner) &&
2345             nla_put_u32(skb, IFLA_TUN_OWNER,
2346                         from_kuid_munged(current_user_ns(), tun->owner)))
2347                 goto nla_put_failure;
2348         if (gid_valid(tun->group) &&
2349             nla_put_u32(skb, IFLA_TUN_GROUP,
2350                         from_kgid_munged(current_user_ns(), tun->group)))
2351                 goto nla_put_failure;
2352         if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2353                 goto nla_put_failure;
2354         if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2355                 goto nla_put_failure;
2356         if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2357                 goto nla_put_failure;
2358         if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2359                        !!(tun->flags & IFF_MULTI_QUEUE)))
2360                 goto nla_put_failure;
2361         if (tun->flags & IFF_MULTI_QUEUE) {
2362                 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2363                         goto nla_put_failure;
2364                 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2365                                 tun->numdisabled))
2366                         goto nla_put_failure;
2367         }
2368
2369         return 0;
2370
2371 nla_put_failure:
2372         return -EMSGSIZE;
2373 }
2374
2375 static struct rtnl_link_ops tun_link_ops __read_mostly = {
2376         .kind           = DRV_NAME,
2377         .priv_size      = sizeof(struct tun_struct),
2378         .setup          = tun_setup,
2379         .validate       = tun_validate,
2380         .get_size       = tun_get_size,
2381         .fill_info      = tun_fill_info,
2382 };
2383
2384 static void tun_sock_write_space(struct sock *sk)
2385 {
2386         struct tun_file *tfile;
2387         wait_queue_head_t *wqueue;
2388
2389         if (!sock_writeable(sk))
2390                 return;
2391
2392         if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2393                 return;
2394
2395         wqueue = sk_sleep(sk);
2396         if (wqueue && waitqueue_active(wqueue))
2397                 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2398                                                 EPOLLWRNORM | EPOLLWRBAND);
2399
2400         tfile = container_of(sk, struct tun_file, sk);
2401         kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2402 }
2403
2404 static void tun_put_page(struct tun_page *tpage)
2405 {
2406         if (tpage->page)
2407                 __page_frag_cache_drain(tpage->page, tpage->count);
2408 }
2409
2410 static int tun_xdp_one(struct tun_struct *tun,
2411                        struct tun_file *tfile,
2412                        struct xdp_buff *xdp, int *flush,
2413                        struct tun_page *tpage)
2414 {
2415         unsigned int datasize = xdp->data_end - xdp->data;
2416         struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2417         struct virtio_net_hdr *gso = &hdr->gso;
2418         struct tun_pcpu_stats *stats;
2419         struct bpf_prog *xdp_prog;
2420         struct sk_buff *skb = NULL;
2421         u32 rxhash = 0, act;
2422         int buflen = hdr->buflen;
2423         int err = 0;
2424         bool skb_xdp = false;
2425         struct page *page;
2426
2427         xdp_prog = rcu_dereference(tun->xdp_prog);
2428         if (xdp_prog) {
2429                 if (gso->gso_type) {
2430                         skb_xdp = true;
2431                         goto build;
2432                 }
2433                 xdp_set_data_meta_invalid(xdp);
2434                 xdp->rxq = &tfile->xdp_rxq;
2435
2436                 act = bpf_prog_run_xdp(xdp_prog, xdp);
2437                 err = tun_xdp_act(tun, xdp_prog, xdp, act);
2438                 if (err < 0) {
2439                         put_page(virt_to_head_page(xdp->data));
2440                         return err;
2441                 }
2442
2443                 switch (err) {
2444                 case XDP_REDIRECT:
2445                         *flush = true;
2446                         /* fall through */
2447                 case XDP_TX:
2448                         return 0;
2449                 case XDP_PASS:
2450                         break;
2451                 default:
2452                         page = virt_to_head_page(xdp->data);
2453                         if (tpage->page == page) {
2454                                 ++tpage->count;
2455                         } else {
2456                                 tun_put_page(tpage);
2457                                 tpage->page = page;
2458                                 tpage->count = 1;
2459                         }
2460                         return 0;
2461                 }
2462         }
2463
2464 build:
2465         skb = build_skb(xdp->data_hard_start, buflen);
2466         if (!skb) {
2467                 err = -ENOMEM;
2468                 goto out;
2469         }
2470
2471         skb_reserve(skb, xdp->data - xdp->data_hard_start);
2472         skb_put(skb, xdp->data_end - xdp->data);
2473
2474         if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2475                 this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2476                 kfree_skb(skb);
2477                 err = -EINVAL;
2478                 goto out;
2479         }
2480
2481         skb->protocol = eth_type_trans(skb, tun->dev);
2482         skb_reset_network_header(skb);
2483         skb_probe_transport_header(skb);
2484
2485         if (skb_xdp) {
2486                 err = do_xdp_generic(xdp_prog, skb);
2487                 if (err != XDP_PASS)
2488                         goto out;
2489         }
2490
2491         if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2492             !tfile->detached)
2493                 rxhash = __skb_get_hash_symmetric(skb);
2494
2495         skb_record_rx_queue(skb, tfile->queue_index);
2496         netif_receive_skb(skb);
2497
2498         /* No need for get_cpu_ptr() here since this function is
2499          * always called with bh disabled
2500          */
2501         stats = this_cpu_ptr(tun->pcpu_stats);
2502         u64_stats_update_begin(&stats->syncp);
2503         stats->rx_packets++;
2504         stats->rx_bytes += datasize;
2505         u64_stats_update_end(&stats->syncp);
2506
2507         if (rxhash)
2508                 tun_flow_update(tun, rxhash, tfile);
2509
2510 out:
2511         return err;
2512 }
2513
2514 static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2515 {
2516         int ret, i;
2517         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2518         struct tun_struct *tun = tun_get(tfile);
2519         struct tun_msg_ctl *ctl = m->msg_control;
2520         struct xdp_buff *xdp;
2521
2522         if (!tun)
2523                 return -EBADFD;
2524
2525         if (ctl && (ctl->type == TUN_MSG_PTR)) {
2526                 struct tun_page tpage;
2527                 int n = ctl->num;
2528                 int flush = 0;
2529
2530                 memset(&tpage, 0, sizeof(tpage));
2531
2532                 local_bh_disable();
2533                 rcu_read_lock();
2534
2535                 for (i = 0; i < n; i++) {
2536                         xdp = &((struct xdp_buff *)ctl->ptr)[i];
2537                         tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2538                 }
2539
2540                 if (flush)
2541                         xdp_do_flush_map();
2542
2543                 rcu_read_unlock();
2544                 local_bh_enable();
2545
2546                 tun_put_page(&tpage);
2547
2548                 ret = total_len;
2549                 goto out;
2550         }
2551
2552         ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2553                            m->msg_flags & MSG_DONTWAIT,
2554                            m->msg_flags & MSG_MORE);
2555 out:
2556         tun_put(tun);
2557         return ret;
2558 }
2559
2560 static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2561                        int flags)
2562 {
2563         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2564         struct tun_struct *tun = tun_get(tfile);
2565         void *ptr = m->msg_control;
2566         int ret;
2567
2568         if (!tun) {
2569                 ret = -EBADFD;
2570                 goto out_free;
2571         }
2572
2573         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2574                 ret = -EINVAL;
2575                 goto out_put_tun;
2576         }
2577         if (flags & MSG_ERRQUEUE) {
2578                 ret = sock_recv_errqueue(sock->sk, m, total_len,
2579                                          SOL_PACKET, TUN_TX_TIMESTAMP);
2580                 goto out;
2581         }
2582         ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2583         if (ret > (ssize_t)total_len) {
2584                 m->msg_flags |= MSG_TRUNC;
2585                 ret = flags & MSG_TRUNC ? ret : total_len;
2586         }
2587 out:
2588         tun_put(tun);
2589         return ret;
2590
2591 out_put_tun:
2592         tun_put(tun);
2593 out_free:
2594         tun_ptr_free(ptr);
2595         return ret;
2596 }
2597
2598 static int tun_ptr_peek_len(void *ptr)
2599 {
2600         if (likely(ptr)) {
2601                 if (tun_is_xdp_frame(ptr)) {
2602                         struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2603
2604                         return xdpf->len;
2605                 }
2606                 return __skb_array_len_with_tag(ptr);
2607         } else {
2608                 return 0;
2609         }
2610 }
2611
2612 static int tun_peek_len(struct socket *sock)
2613 {
2614         struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2615         struct tun_struct *tun;
2616         int ret = 0;
2617
2618         tun = tun_get(tfile);
2619         if (!tun)
2620                 return 0;
2621
2622         ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2623         tun_put(tun);
2624
2625         return ret;
2626 }
2627
2628 /* Ops structure to mimic raw sockets with tun */
2629 static const struct proto_ops tun_socket_ops = {
2630         .peek_len = tun_peek_len,
2631         .sendmsg = tun_sendmsg,
2632         .recvmsg = tun_recvmsg,
2633 };
2634
2635 static struct proto tun_proto = {
2636         .name           = "tun",
2637         .owner          = THIS_MODULE,
2638         .obj_size       = sizeof(struct tun_file),
2639 };
2640
2641 static int tun_flags(struct tun_struct *tun)
2642 {
2643         return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2644 }
2645
2646 static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2647                               char *buf)
2648 {
2649         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2650         return sprintf(buf, "0x%x\n", tun_flags(tun));
2651 }
2652
2653 static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2654                               char *buf)
2655 {
2656         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2657         return uid_valid(tun->owner)?
2658                 sprintf(buf, "%u\n",
2659                         from_kuid_munged(current_user_ns(), tun->owner)):
2660                 sprintf(buf, "-1\n");
2661 }
2662
2663 static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2664                               char *buf)
2665 {
2666         struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2667         return gid_valid(tun->group) ?
2668                 sprintf(buf, "%u\n",
2669                         from_kgid_munged(current_user_ns(), tun->group)):
2670                 sprintf(buf, "-1\n");
2671 }
2672
2673 static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2674 static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2675 static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2676
2677 static struct attribute *tun_dev_attrs[] = {
2678         &dev_attr_tun_flags.attr,
2679         &dev_attr_owner.attr,
2680         &dev_attr_group.attr,
2681         NULL
2682 };
2683
2684 static const struct attribute_group tun_attr_group = {
2685         .attrs = tun_dev_attrs
2686 };
2687
2688 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2689 {
2690         struct tun_struct *tun;
2691         struct tun_file *tfile = file->private_data;
2692         struct net_device *dev;
2693         int err;
2694
2695         if (tfile->detached)
2696                 return -EINVAL;
2697
2698         if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2699                 if (!capable(CAP_NET_ADMIN))
2700                         return -EPERM;
2701
2702                 if (!(ifr->ifr_flags & IFF_NAPI) ||
2703                     (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2704                         return -EINVAL;
2705         }
2706
2707         dev = __dev_get_by_name(net, ifr->ifr_name);
2708         if (dev) {
2709                 if (ifr->ifr_flags & IFF_TUN_EXCL)
2710                         return -EBUSY;
2711                 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2712                         tun = netdev_priv(dev);
2713                 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2714                         tun = netdev_priv(dev);
2715                 else
2716                         return -EINVAL;
2717
2718                 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2719                     !!(tun->flags & IFF_MULTI_QUEUE))
2720                         return -EINVAL;
2721
2722                 if (tun_not_capable(tun))
2723                         return -EPERM;
2724                 err = security_tun_dev_open(tun->security);
2725                 if (err < 0)
2726                         return err;
2727
2728                 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2729                                  ifr->ifr_flags & IFF_NAPI,
2730                                  ifr->ifr_flags & IFF_NAPI_FRAGS);
2731                 if (err < 0)
2732                         return err;
2733
2734                 if (tun->flags & IFF_MULTI_QUEUE &&
2735                     (tun->numqueues + tun->numdisabled > 1)) {
2736                         /* One or more queue has already been attached, no need
2737                          * to initialize the device again.
2738                          */
2739                         netdev_state_change(dev);
2740                         return 0;
2741                 }
2742
2743                 tun->flags = (tun->flags & ~TUN_FEATURES) |
2744                               (ifr->ifr_flags & TUN_FEATURES);
2745
2746                 netdev_state_change(dev);
2747         } else {
2748                 char *name;
2749                 unsigned long flags = 0;
2750                 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2751                              MAX_TAP_QUEUES : 1;
2752
2753                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2754                         return -EPERM;
2755                 err = security_tun_dev_create();
2756                 if (err < 0)
2757                         return err;
2758
2759                 /* Set dev type */
2760                 if (ifr->ifr_flags & IFF_TUN) {
2761                         /* TUN device */
2762                         flags |= IFF_TUN;
2763                         name = "tun%d";
2764                 } else if (ifr->ifr_flags & IFF_TAP) {
2765                         /* TAP device */
2766                         flags |= IFF_TAP;
2767                         name = "tap%d";
2768                 } else
2769                         return -EINVAL;
2770
2771                 if (*ifr->ifr_name)
2772                         name = ifr->ifr_name;
2773
2774                 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2775                                        NET_NAME_UNKNOWN, tun_setup, queues,
2776                                        queues);
2777
2778                 if (!dev)
2779                         return -ENOMEM;
2780                 err = dev_get_valid_name(net, dev, name);
2781                 if (err < 0)
2782                         goto err_free_dev;
2783
2784                 dev_net_set(dev, net);
2785                 dev->rtnl_link_ops = &tun_link_ops;
2786                 dev->ifindex = tfile->ifindex;
2787                 dev->sysfs_groups[0] = &tun_attr_group;
2788
2789                 tun = netdev_priv(dev);
2790                 tun->dev = dev;
2791                 tun->flags = flags;
2792                 tun->txflt.count = 0;
2793                 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2794
2795                 tun->align = NET_SKB_PAD;
2796                 tun->filter_attached = false;
2797                 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2798                 tun->rx_batched = 0;
2799                 RCU_INIT_POINTER(tun->steering_prog, NULL);
2800
2801                 tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2802                 if (!tun->pcpu_stats) {
2803                         err = -ENOMEM;
2804                         goto err_free_dev;
2805                 }
2806
2807                 spin_lock_init(&tun->lock);
2808
2809                 err = security_tun_dev_alloc_security(&tun->security);
2810                 if (err < 0)
2811                         goto err_free_stat;
2812
2813                 tun_net_init(dev);
2814                 tun_flow_init(tun);
2815
2816                 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
2817                                    TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2818                                    NETIF_F_HW_VLAN_STAG_TX;
2819                 dev->features = dev->hw_features | NETIF_F_LLTX;
2820                 dev->vlan_features = dev->features &
2821                                      ~(NETIF_F_HW_VLAN_CTAG_TX |
2822                                        NETIF_F_HW_VLAN_STAG_TX);
2823
2824                 tun->flags = (tun->flags & ~TUN_FEATURES) |
2825                               (ifr->ifr_flags & TUN_FEATURES);
2826
2827                 INIT_LIST_HEAD(&tun->disabled);
2828                 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2829                                  ifr->ifr_flags & IFF_NAPI_FRAGS);
2830                 if (err < 0)
2831                         goto err_free_flow;
2832
2833                 err = register_netdevice(tun->dev);
2834                 if (err < 0)
2835                         goto err_detach;
2836         }
2837
2838         netif_carrier_on(tun->dev);
2839
2840         tun_debug(KERN_INFO, tun, "tun_set_iff\n");
2841
2842         /* Make sure persistent devices do not get stuck in
2843          * xoff state.
2844          */
2845         if (netif_running(tun->dev))
2846                 netif_tx_wake_all_queues(tun->dev);
2847
2848         strcpy(ifr->ifr_name, tun->dev->name);
2849         return 0;
2850
2851 err_detach:
2852         tun_detach_all(dev);
2853         /* register_netdevice() already called tun_free_netdev() */
2854         goto err_free_dev;
2855
2856 err_free_flow:
2857         tun_flow_uninit(tun);
2858         security_tun_dev_free_security(tun->security);
2859 err_free_stat:
2860         free_percpu(tun->pcpu_stats);
2861 err_free_dev:
2862         free_netdev(dev);
2863         return err;
2864 }
2865
2866 static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2867 {
2868         tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2869
2870         strcpy(ifr->ifr_name, tun->dev->name);
2871
2872         ifr->ifr_flags = tun_flags(tun);
2873
2874 }
2875
2876 /* This is like a cut-down ethtool ops, except done via tun fd so no
2877  * privs required. */
2878 static int set_offload(struct tun_struct *tun, unsigned long arg)
2879 {
2880         netdev_features_t features = 0;
2881
2882         if (arg & TUN_F_CSUM) {
2883                 features |= NETIF_F_HW_CSUM;
2884                 arg &= ~TUN_F_CSUM;
2885
2886                 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2887                         if (arg & TUN_F_TSO_ECN) {
2888                                 features |= NETIF_F_TSO_ECN;
2889                                 arg &= ~TUN_F_TSO_ECN;
2890                         }
2891                         if (arg & TUN_F_TSO4)
2892                                 features |= NETIF_F_TSO;
2893                         if (arg & TUN_F_TSO6)
2894                                 features |= NETIF_F_TSO6;
2895                         arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2896                 }
2897
2898                 arg &= ~TUN_F_UFO;
2899         }
2900
2901         /* This gives the user a way to test for new features in future by
2902          * trying to set them. */
2903         if (arg)
2904                 return -EINVAL;
2905
2906         tun->set_features = features;
2907         tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2908         tun->dev->wanted_features |= features;
2909         netdev_update_features(tun->dev);
2910
2911         return 0;
2912 }
2913
2914 static void tun_detach_filter(struct tun_struct *tun, int n)
2915 {
2916         int i;
2917         struct tun_file *tfile;
2918
2919         for (i = 0; i < n; i++) {
2920                 tfile = rtnl_dereference(tun->tfiles[i]);
2921                 lock_sock(tfile->socket.sk);
2922                 sk_detach_filter(tfile->socket.sk);
2923                 release_sock(tfile->socket.sk);
2924         }
2925
2926         tun->filter_attached = false;
2927 }
2928
2929 static int tun_attach_filter(struct tun_struct *tun)
2930 {
2931         int i, ret = 0;
2932         struct tun_file *tfile;
2933
2934         for (i = 0; i < tun->numqueues; i++) {
2935                 tfile = rtnl_dereference(tun->tfiles[i]);
2936                 lock_sock(tfile->socket.sk);
2937                 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2938                 release_sock(tfile->socket.sk);
2939                 if (ret) {
2940                         tun_detach_filter(tun, i);
2941                         return ret;
2942                 }
2943         }
2944
2945         tun->filter_attached = true;
2946         return ret;
2947 }
2948
2949 static void tun_set_sndbuf(struct tun_struct *tun)
2950 {
2951         struct tun_file *tfile;
2952         int i;
2953
2954         for (i = 0; i < tun->numqueues; i++) {
2955                 tfile = rtnl_dereference(tun->tfiles[i]);
2956                 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2957         }
2958 }
2959
2960 static int tun_set_queue(struct file *file, struct ifreq *ifr)
2961 {
2962         struct tun_file *tfile = file->private_data;
2963         struct tun_struct *tun;
2964         int ret = 0;
2965
2966         rtnl_lock();
2967
2968         if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2969                 tun = tfile->detached;
2970                 if (!tun) {
2971                         ret = -EINVAL;
2972                         goto unlock;
2973                 }
2974                 ret = security_tun_dev_attach_queue(tun->security);
2975                 if (ret < 0)
2976                         goto unlock;
2977                 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2978                                  tun->flags & IFF_NAPI_FRAGS);
2979         } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2980                 tun = rtnl_dereference(tfile->tun);
2981                 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2982                         ret = -EINVAL;
2983                 else
2984                         __tun_detach(tfile, false);
2985         } else
2986                 ret = -EINVAL;
2987
2988         if (ret >= 0)
2989                 netdev_state_change(tun->dev);
2990
2991 unlock:
2992         rtnl_unlock();
2993         return ret;
2994 }
2995
2996 static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
2997                         void __user *data)
2998 {
2999         struct bpf_prog *prog;
3000         int fd;
3001
3002         if (copy_from_user(&fd, data, sizeof(fd)))
3003                 return -EFAULT;
3004
3005         if (fd == -1) {
3006                 prog = NULL;
3007         } else {
3008                 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3009                 if (IS_ERR(prog))
3010                         return PTR_ERR(prog);
3011         }
3012
3013         return __tun_set_ebpf(tun, prog_p, prog);
3014 }
3015
3016 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3017                             unsigned long arg, int ifreq_len)
3018 {
3019         struct tun_file *tfile = file->private_data;
3020         struct net *net = sock_net(&tfile->sk);
3021         struct tun_struct *tun;
3022         void __user* argp = (void __user*)arg;
3023         unsigned int ifindex, carrier;
3024         struct ifreq ifr;
3025         kuid_t owner;
3026         kgid_t group;
3027         int sndbuf;
3028         int vnet_hdr_sz;
3029         int le;
3030         int ret;
3031         bool do_notify = false;
3032
3033         if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3034             (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3035                 if (copy_from_user(&ifr, argp, ifreq_len))
3036                         return -EFAULT;
3037         } else {
3038                 memset(&ifr, 0, sizeof(ifr));
3039         }
3040         if (cmd == TUNGETFEATURES) {
3041                 /* Currently this just means: "what IFF flags are valid?".
3042                  * This is needed because we never checked for invalid flags on
3043                  * TUNSETIFF.
3044                  */
3045                 return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3046                                 (unsigned int __user*)argp);
3047         } else if (cmd == TUNSETQUEUE) {
3048                 return tun_set_queue(file, &ifr);
3049         } else if (cmd == SIOCGSKNS) {
3050                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3051                         return -EPERM;
3052                 return open_related_ns(&net->ns, get_net_ns);
3053         }
3054
3055         ret = 0;
3056         rtnl_lock();
3057
3058         tun = tun_get(tfile);
3059         if (cmd == TUNSETIFF) {
3060                 ret = -EEXIST;
3061                 if (tun)
3062                         goto unlock;
3063
3064                 ifr.ifr_name[IFNAMSIZ-1] = '\0';
3065
3066                 ret = tun_set_iff(net, file, &ifr);
3067
3068                 if (ret)
3069                         goto unlock;
3070
3071                 if (copy_to_user(argp, &ifr, ifreq_len))
3072                         ret = -EFAULT;
3073                 goto unlock;
3074         }
3075         if (cmd == TUNSETIFINDEX) {
3076                 ret = -EPERM;
3077                 if (tun)
3078                         goto unlock;
3079
3080                 ret = -EFAULT;
3081                 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3082                         goto unlock;
3083
3084                 ret = 0;
3085                 tfile->ifindex = ifindex;
3086                 goto unlock;
3087         }
3088
3089         ret = -EBADFD;
3090         if (!tun)
3091                 goto unlock;
3092
3093         tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
3094
3095         net = dev_net(tun->dev);
3096         ret = 0;
3097         switch (cmd) {
3098         case TUNGETIFF:
3099                 tun_get_iff(tun, &ifr);
3100
3101                 if (tfile->detached)
3102                         ifr.ifr_flags |= IFF_DETACH_QUEUE;
3103                 if (!tfile->socket.sk->sk_filter)
3104                         ifr.ifr_flags |= IFF_NOFILTER;
3105
3106                 if (copy_to_user(argp, &ifr, ifreq_len))
3107                         ret = -EFAULT;
3108                 break;
3109
3110         case TUNSETNOCSUM:
3111                 /* Disable/Enable checksum */
3112
3113                 /* [unimplemented] */
3114                 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
3115                           arg ? "disabled" : "enabled");
3116                 break;
3117
3118         case TUNSETPERSIST:
3119                 /* Disable/Enable persist mode. Keep an extra reference to the
3120                  * module to prevent the module being unprobed.
3121                  */
3122                 if (arg && !(tun->flags & IFF_PERSIST)) {
3123                         tun->flags |= IFF_PERSIST;
3124                         __module_get(THIS_MODULE);
3125                         do_notify = true;
3126                 }
3127                 if (!arg && (tun->flags & IFF_PERSIST)) {
3128                         tun->flags &= ~IFF_PERSIST;
3129                         module_put(THIS_MODULE);
3130                         do_notify = true;
3131                 }
3132
3133                 tun_debug(KERN_INFO, tun, "persist %s\n",
3134                           arg ? "enabled" : "disabled");
3135                 break;
3136
3137         case TUNSETOWNER:
3138                 /* Set owner of the device */
3139                 owner = make_kuid(current_user_ns(), arg);
3140                 if (!uid_valid(owner)) {
3141                         ret = -EINVAL;
3142                         break;
3143                 }
3144                 tun->owner = owner;
3145                 do_notify = true;
3146                 tun_debug(KERN_INFO, tun, "owner set to %u\n",
3147                           from_kuid(&init_user_ns, tun->owner));
3148                 break;
3149
3150         case TUNSETGROUP:
3151                 /* Set group of the device */
3152                 group = make_kgid(current_user_ns(), arg);
3153                 if (!gid_valid(group)) {
3154                         ret = -EINVAL;
3155                         break;
3156                 }
3157                 tun->group = group;
3158                 do_notify = true;
3159                 tun_debug(KERN_INFO, tun, "group set to %u\n",
3160                           from_kgid(&init_user_ns, tun->group));
3161                 break;
3162
3163         case TUNSETLINK:
3164                 /* Only allow setting the type when the interface is down */
3165                 if (tun->dev->flags & IFF_UP) {
3166                         tun_debug(KERN_INFO, tun,
3167                                   "Linktype set failed because interface is up\n");
3168                         ret = -EBUSY;
3169                 } else {
3170                         tun->dev->type = (int) arg;
3171                         tun_debug(KERN_INFO, tun, "linktype set to %d\n",
3172                                   tun->dev->type);
3173                         ret = 0;
3174                 }
3175                 break;
3176
3177 #ifdef TUN_DEBUG
3178         case TUNSETDEBUG:
3179                 tun->debug = arg;
3180                 break;
3181 #endif
3182         case TUNSETOFFLOAD:
3183                 ret = set_offload(tun, arg);
3184                 break;
3185
3186         case TUNSETTXFILTER:
3187                 /* Can be set only for TAPs */
3188                 ret = -EINVAL;
3189                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3190                         break;
3191                 ret = update_filter(&tun->txflt, (void __user *)arg);
3192                 break;
3193
3194         case SIOCGIFHWADDR:
3195                 /* Get hw address */
3196                 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3197                 ifr.ifr_hwaddr.sa_family = tun->dev->type;
3198                 if (copy_to_user(argp, &ifr, ifreq_len))
3199                         ret = -EFAULT;
3200                 break;
3201
3202         case SIOCSIFHWADDR:
3203                 /* Set hw address */
3204                 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
3205                           ifr.ifr_hwaddr.sa_data);
3206
3207                 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3208                 break;
3209
3210         case TUNGETSNDBUF:
3211                 sndbuf = tfile->socket.sk->sk_sndbuf;
3212                 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3213                         ret = -EFAULT;
3214                 break;
3215
3216         case TUNSETSNDBUF:
3217                 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3218                         ret = -EFAULT;
3219                         break;
3220                 }
3221                 if (sndbuf <= 0) {
3222                         ret = -EINVAL;
3223                         break;
3224                 }
3225
3226                 tun->sndbuf = sndbuf;
3227                 tun_set_sndbuf(tun);
3228                 break;
3229
3230         case TUNGETVNETHDRSZ:
3231                 vnet_hdr_sz = tun->vnet_hdr_sz;
3232                 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3233                         ret = -EFAULT;
3234                 break;
3235
3236         case TUNSETVNETHDRSZ:
3237                 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3238                         ret = -EFAULT;
3239                         break;
3240                 }
3241                 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3242                         ret = -EINVAL;
3243                         break;
3244                 }
3245
3246                 tun->vnet_hdr_sz = vnet_hdr_sz;
3247                 break;
3248
3249         case TUNGETVNETLE:
3250                 le = !!(tun->flags & TUN_VNET_LE);
3251                 if (put_user(le, (int __user *)argp))
3252                         ret = -EFAULT;
3253                 break;
3254
3255         case TUNSETVNETLE:
3256                 if (get_user(le, (int __user *)argp)) {
3257                         ret = -EFAULT;
3258                         break;
3259                 }
3260                 if (le)
3261                         tun->flags |= TUN_VNET_LE;
3262                 else
3263                         tun->flags &= ~TUN_VNET_LE;
3264                 break;
3265
3266         case TUNGETVNETBE:
3267                 ret = tun_get_vnet_be(tun, argp);
3268                 break;
3269
3270         case TUNSETVNETBE:
3271                 ret = tun_set_vnet_be(tun, argp);
3272                 break;
3273
3274         case TUNATTACHFILTER:
3275                 /* Can be set only for TAPs */
3276                 ret = -EINVAL;
3277                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3278                         break;
3279                 ret = -EFAULT;
3280                 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3281                         break;
3282
3283                 ret = tun_attach_filter(tun);
3284                 break;
3285
3286         case TUNDETACHFILTER:
3287                 /* Can be set only for TAPs */
3288                 ret = -EINVAL;
3289                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3290                         break;
3291                 ret = 0;
3292                 tun_detach_filter(tun, tun->numqueues);
3293                 break;
3294
3295         case TUNGETFILTER:
3296                 ret = -EINVAL;
3297                 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3298                         break;
3299                 ret = -EFAULT;
3300                 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3301                         break;
3302                 ret = 0;
3303                 break;
3304
3305         case TUNSETSTEERINGEBPF:
3306                 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3307                 break;
3308
3309         case TUNSETFILTEREBPF:
3310                 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3311                 break;
3312
3313         case TUNSETCARRIER:
3314                 ret = -EFAULT;
3315                 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3316                         goto unlock;
3317
3318                 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3319                 break;
3320
3321         case TUNGETDEVNETNS:
3322                 ret = -EPERM;
3323                 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3324                         goto unlock;
3325                 ret = open_related_ns(&net->ns, get_net_ns);
3326                 break;
3327
3328         default:
3329                 ret = -EINVAL;
3330                 break;
3331         }
3332
3333         if (do_notify)
3334                 netdev_state_change(tun->dev);
3335
3336 unlock:
3337         rtnl_unlock();
3338         if (tun)
3339                 tun_put(tun);
3340         return ret;
3341 }
3342
3343 static long tun_chr_ioctl(struct file *file,
3344                           unsigned int cmd, unsigned long arg)
3345 {
3346         return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3347 }
3348
3349 #ifdef CONFIG_COMPAT
3350 static long tun_chr_compat_ioctl(struct file *file,
3351                          unsigned int cmd, unsigned long arg)
3352 {
3353         switch (cmd) {
3354         case TUNSETIFF:
3355         case TUNGETIFF:
3356         case TUNSETTXFILTER:
3357         case TUNGETSNDBUF:
3358         case TUNSETSNDBUF:
3359         case SIOCGIFHWADDR:
3360         case SIOCSIFHWADDR:
3361                 arg = (unsigned long)compat_ptr(arg);
3362                 break;
3363         default:
3364                 arg = (compat_ulong_t)arg;
3365                 break;
3366         }
3367
3368         /*
3369          * compat_ifreq is shorter than ifreq, so we must not access beyond
3370          * the end of that structure. All fields that are used in this
3371          * driver are compatible though, we don't need to convert the
3372          * contents.
3373          */
3374         return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3375 }
3376 #endif /* CONFIG_COMPAT */
3377
3378 static int tun_chr_fasync(int fd, struct file *file, int on)
3379 {
3380         struct tun_file *tfile = file->private_data;
3381         int ret;
3382
3383         if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3384                 goto out;
3385
3386         if (on) {
3387                 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3388                 tfile->flags |= TUN_FASYNC;
3389         } else
3390                 tfile->flags &= ~TUN_FASYNC;
3391         ret = 0;
3392 out:
3393         return ret;
3394 }
3395
3396 static int tun_chr_open(struct inode *inode, struct file * file)
3397 {
3398         struct net *net = current->nsproxy->net_ns;
3399         struct tun_file *tfile;
3400
3401         DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3402
3403         tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3404                                             &tun_proto, 0);
3405         if (!tfile)
3406                 return -ENOMEM;
3407         if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3408                 sk_free(&tfile->sk);
3409                 return -ENOMEM;
3410         }
3411
3412         mutex_init(&tfile->napi_mutex);
3413         RCU_INIT_POINTER(tfile->tun, NULL);
3414         tfile->flags = 0;
3415         tfile->ifindex = 0;
3416
3417         init_waitqueue_head(&tfile->socket.wq.wait);
3418
3419         tfile->socket.file = file;
3420         tfile->socket.ops = &tun_socket_ops;
3421
3422         sock_init_data(&tfile->socket, &tfile->sk);
3423
3424         tfile->sk.sk_write_space = tun_sock_write_space;
3425         tfile->sk.sk_sndbuf = INT_MAX;
3426
3427         file->private_data = tfile;
3428         INIT_LIST_HEAD(&tfile->next);
3429
3430         sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3431
3432         return 0;
3433 }
3434
3435 static int tun_chr_close(struct inode *inode, struct file *file)
3436 {
3437         struct tun_file *tfile = file->private_data;
3438
3439         tun_detach(tfile, true);
3440
3441         return 0;
3442 }
3443
3444 #ifdef CONFIG_PROC_FS
3445 static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3446 {
3447         struct tun_file *tfile = file->private_data;
3448         struct tun_struct *tun;
3449         struct ifreq ifr;
3450
3451         memset(&ifr, 0, sizeof(ifr));
3452
3453         rtnl_lock();
3454         tun = tun_get(tfile);
3455         if (tun)
3456                 tun_get_iff(tun, &ifr);
3457         rtnl_unlock();
3458
3459         if (tun)
3460                 tun_put(tun);
3461
3462         seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3463 }
3464 #endif
3465
3466 static const struct file_operations tun_fops = {
3467         .owner  = THIS_MODULE,
3468         .llseek = no_llseek,
3469         .read_iter  = tun_chr_read_iter,
3470         .write_iter = tun_chr_write_iter,
3471         .poll   = tun_chr_poll,
3472         .unlocked_ioctl = tun_chr_ioctl,
3473 #ifdef CONFIG_COMPAT
3474         .compat_ioctl = tun_chr_compat_ioctl,
3475 #endif
3476         .open   = tun_chr_open,
3477         .release = tun_chr_close,
3478         .fasync = tun_chr_fasync,
3479 #ifdef CONFIG_PROC_FS
3480         .show_fdinfo = tun_chr_show_fdinfo,
3481 #endif
3482 };
3483
3484 static struct miscdevice tun_miscdev = {
3485         .minor = TUN_MINOR,
3486         .name = "tun",
3487         .nodename = "net/tun",
3488         .fops = &tun_fops,
3489 };
3490
3491 /* ethtool interface */
3492
3493 static void tun_default_link_ksettings(struct net_device *dev,
3494                                        struct ethtool_link_ksettings *cmd)
3495 {
3496         ethtool_link_ksettings_zero_link_mode(cmd, supported);
3497         ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3498         cmd->base.speed         = SPEED_10;
3499         cmd->base.duplex        = DUPLEX_FULL;
3500         cmd->base.port          = PORT_TP;
3501         cmd->base.phy_address   = 0;
3502         cmd->base.autoneg       = AUTONEG_DISABLE;
3503 }
3504
3505 static int tun_get_link_ksettings(struct net_device *dev,
3506                                   struct ethtool_link_ksettings *cmd)
3507 {
3508         struct tun_struct *tun = netdev_priv(dev);
3509
3510         memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3511         return 0;
3512 }
3513
3514 static int tun_set_link_ksettings(struct net_device *dev,
3515                                   const struct ethtool_link_ksettings *cmd)
3516 {
3517         struct tun_struct *tun = netdev_priv(dev);
3518
3519         memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3520         return 0;
3521 }
3522
3523 static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3524 {
3525         struct tun_struct *tun = netdev_priv(dev);
3526
3527         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3528         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3529
3530         switch (tun->flags & TUN_TYPE_MASK) {
3531         case IFF_TUN:
3532                 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3533                 break;
3534         case IFF_TAP:
3535                 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3536                 break;
3537         }
3538 }
3539
3540 static u32 tun_get_msglevel(struct net_device *dev)
3541 {
3542 #ifdef TUN_DEBUG
3543         struct tun_struct *tun = netdev_priv(dev);
3544         return tun->debug;
3545 #else
3546         return -EOPNOTSUPP;
3547 #endif
3548 }
3549
3550 static void tun_set_msglevel(struct net_device *dev, u32 value)
3551 {
3552 #ifdef TUN_DEBUG
3553         struct tun_struct *tun = netdev_priv(dev);
3554         tun->debug = value;
3555 #endif
3556 }
3557
3558 static int tun_get_coalesce(struct net_device *dev,
3559                             struct ethtool_coalesce *ec)
3560 {
3561         struct tun_struct *tun = netdev_priv(dev);
3562
3563         ec->rx_max_coalesced_frames = tun->rx_batched;
3564
3565         return 0;
3566 }
3567
3568 static int tun_set_coalesce(struct net_device *dev,
3569                             struct ethtool_coalesce *ec)
3570 {
3571         struct tun_struct *tun = netdev_priv(dev);
3572
3573         if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3574                 tun->rx_batched = NAPI_POLL_WEIGHT;
3575         else
3576                 tun->rx_batched = ec->rx_max_coalesced_frames;
3577
3578         return 0;
3579 }
3580
3581 static const struct ethtool_ops tun_ethtool_ops = {
3582         .get_drvinfo    = tun_get_drvinfo,
3583         .get_msglevel   = tun_get_msglevel,
3584         .set_msglevel   = tun_set_msglevel,
3585         .get_link       = ethtool_op_get_link,
3586         .get_ts_info    = ethtool_op_get_ts_info,
3587         .get_coalesce   = tun_get_coalesce,
3588         .set_coalesce   = tun_set_coalesce,
3589         .get_link_ksettings = tun_get_link_ksettings,
3590         .set_link_ksettings = tun_set_link_ksettings,
3591 };
3592
3593 static int tun_queue_resize(struct tun_struct *tun)
3594 {
3595         struct net_device *dev = tun->dev;
3596         struct tun_file *tfile;
3597         struct ptr_ring **rings;
3598         int n = tun->numqueues + tun->numdisabled;
3599         int ret, i;
3600
3601         rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3602         if (!rings)
3603                 return -ENOMEM;
3604
3605         for (i = 0; i < tun->numqueues; i++) {
3606                 tfile = rtnl_dereference(tun->tfiles[i]);
3607                 rings[i] = &tfile->tx_ring;
3608         }
3609         list_for_each_entry(tfile, &tun->disabled, next)
3610                 rings[i++] = &tfile->tx_ring;
3611
3612         ret = ptr_ring_resize_multiple(rings, n,
3613                                        dev->tx_queue_len, GFP_KERNEL,
3614                                        tun_ptr_free);
3615
3616         kfree(rings);
3617         return ret;
3618 }
3619
3620 static int tun_device_event(struct notifier_block *unused,
3621                             unsigned long event, void *ptr)
3622 {
3623         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3624         struct tun_struct *tun = netdev_priv(dev);
3625         int i;
3626
3627         if (dev->rtnl_link_ops != &tun_link_ops)
3628                 return NOTIFY_DONE;
3629
3630         switch (event) {
3631         case NETDEV_CHANGE_TX_QUEUE_LEN:
3632                 if (tun_queue_resize(tun))
3633                         return NOTIFY_BAD;
3634                 break;
3635         case NETDEV_UP:
3636                 for (i = 0; i < tun->numqueues; i++) {
3637                         struct tun_file *tfile;
3638
3639                         tfile = rtnl_dereference(tun->tfiles[i]);
3640                         tfile->socket.sk->sk_write_space(tfile->socket.sk);
3641                 }
3642                 break;
3643         default:
3644                 break;
3645         }
3646
3647         return NOTIFY_DONE;
3648 }
3649
3650 static struct notifier_block tun_notifier_block __read_mostly = {
3651         .notifier_call  = tun_device_event,
3652 };
3653
3654 static int __init tun_init(void)
3655 {
3656         int ret = 0;
3657
3658         pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3659
3660         ret = rtnl_link_register(&tun_link_ops);
3661         if (ret) {
3662                 pr_err("Can't register link_ops\n");
3663                 goto err_linkops;
3664         }
3665
3666         ret = misc_register(&tun_miscdev);
3667         if (ret) {
3668                 pr_err("Can't register misc device %d\n", TUN_MINOR);
3669                 goto err_misc;
3670         }
3671
3672         ret = register_netdevice_notifier(&tun_notifier_block);
3673         if (ret) {
3674                 pr_err("Can't register netdevice notifier\n");
3675                 goto err_notifier;
3676         }
3677
3678         return  0;
3679
3680 err_notifier:
3681         misc_deregister(&tun_miscdev);
3682 err_misc:
3683         rtnl_link_unregister(&tun_link_ops);
3684 err_linkops:
3685         return ret;
3686 }
3687
3688 static void tun_cleanup(void)
3689 {
3690         misc_deregister(&tun_miscdev);
3691         rtnl_link_unregister(&tun_link_ops);
3692         unregister_netdevice_notifier(&tun_notifier_block);
3693 }
3694
3695 /* Get an underlying socket object from tun file.  Returns error unless file is
3696  * attached to a device.  The returned object works like a packet socket, it
3697  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3698  * holding a reference to the file for as long as the socket is in use. */
3699 struct socket *tun_get_socket(struct file *file)
3700 {
3701         struct tun_file *tfile;
3702         if (file->f_op != &tun_fops)
3703                 return ERR_PTR(-EINVAL);
3704         tfile = file->private_data;
3705         if (!tfile)
3706                 return ERR_PTR(-EBADFD);
3707         return &tfile->socket;
3708 }
3709 EXPORT_SYMBOL_GPL(tun_get_socket);
3710
3711 struct ptr_ring *tun_get_tx_ring(struct file *file)
3712 {
3713         struct tun_file *tfile;
3714
3715         if (file->f_op != &tun_fops)
3716                 return ERR_PTR(-EINVAL);
3717         tfile = file->private_data;
3718         if (!tfile)
3719                 return ERR_PTR(-EBADFD);
3720         return &tfile->tx_ring;
3721 }
3722 EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3723
3724 module_init(tun_init);
3725 module_exit(tun_cleanup);
3726 MODULE_DESCRIPTION(DRV_DESCRIPTION);
3727 MODULE_AUTHOR(DRV_COPYRIGHT);
3728 MODULE_LICENSE("GPL");
3729 MODULE_ALIAS_MISCDEV(TUN_MINOR);
3730 MODULE_ALIAS("devname:net/tun");