net: tap: change tap_alloc_skb() to allow bigger paged allocations
[linux-2.6-microblaze.git] / drivers / net / tap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/etherdevice.h>
3 #include <linux/if_tap.h>
4 #include <linux/if_vlan.h>
5 #include <linux/interrupt.h>
6 #include <linux/nsproxy.h>
7 #include <linux/compat.h>
8 #include <linux/if_tun.h>
9 #include <linux/module.h>
10 #include <linux/skbuff.h>
11 #include <linux/cache.h>
12 #include <linux/sched/signal.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/wait.h>
16 #include <linux/cdev.h>
17 #include <linux/idr.h>
18 #include <linux/fs.h>
19 #include <linux/uio.h>
20
21 #include <net/gso.h>
22 #include <net/net_namespace.h>
23 #include <net/rtnetlink.h>
24 #include <net/sock.h>
25 #include <linux/virtio_net.h>
26 #include <linux/skb_array.h>
27
28 #define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
29
30 #define TAP_VNET_LE 0x80000000
31 #define TAP_VNET_BE 0x40000000
32
33 #ifdef CONFIG_TUN_VNET_CROSS_LE
34 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
35 {
36         return q->flags & TAP_VNET_BE ? false :
37                 virtio_legacy_is_little_endian();
38 }
39
40 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
41 {
42         int s = !!(q->flags & TAP_VNET_BE);
43
44         if (put_user(s, sp))
45                 return -EFAULT;
46
47         return 0;
48 }
49
50 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
51 {
52         int s;
53
54         if (get_user(s, sp))
55                 return -EFAULT;
56
57         if (s)
58                 q->flags |= TAP_VNET_BE;
59         else
60                 q->flags &= ~TAP_VNET_BE;
61
62         return 0;
63 }
64 #else
65 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
66 {
67         return virtio_legacy_is_little_endian();
68 }
69
70 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
71 {
72         return -EINVAL;
73 }
74
75 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
76 {
77         return -EINVAL;
78 }
79 #endif /* CONFIG_TUN_VNET_CROSS_LE */
80
81 static inline bool tap_is_little_endian(struct tap_queue *q)
82 {
83         return q->flags & TAP_VNET_LE ||
84                 tap_legacy_is_little_endian(q);
85 }
86
87 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
88 {
89         return __virtio16_to_cpu(tap_is_little_endian(q), val);
90 }
91
92 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
93 {
94         return __cpu_to_virtio16(tap_is_little_endian(q), val);
95 }
96
97 static struct proto tap_proto = {
98         .name = "tap",
99         .owner = THIS_MODULE,
100         .obj_size = sizeof(struct tap_queue),
101 };
102
103 #define TAP_NUM_DEVS (1U << MINORBITS)
104
105 static LIST_HEAD(major_list);
106
107 struct major_info {
108         struct rcu_head rcu;
109         dev_t major;
110         struct idr minor_idr;
111         spinlock_t minor_lock;
112         const char *device_name;
113         struct list_head next;
114 };
115
116 #define GOODCOPY_LEN 128
117
118 static const struct proto_ops tap_socket_ops;
119
120 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
121 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
122
123 static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
124 {
125         return rcu_dereference(dev->rx_handler_data);
126 }
127
128 /*
129  * RCU usage:
130  * The tap_queue and the macvlan_dev are loosely coupled, the
131  * pointers from one to the other can only be read while rcu_read_lock
132  * or rtnl is held.
133  *
134  * Both the file and the macvlan_dev hold a reference on the tap_queue
135  * through sock_hold(&q->sk). When the macvlan_dev goes away first,
136  * q->vlan becomes inaccessible. When the files gets closed,
137  * tap_get_queue() fails.
138  *
139  * There may still be references to the struct sock inside of the
140  * queue from outbound SKBs, but these never reference back to the
141  * file or the dev. The data structure is freed through __sk_free
142  * when both our references and any pending SKBs are gone.
143  */
144
145 static int tap_enable_queue(struct tap_dev *tap, struct file *file,
146                             struct tap_queue *q)
147 {
148         int err = -EINVAL;
149
150         ASSERT_RTNL();
151
152         if (q->enabled)
153                 goto out;
154
155         err = 0;
156         rcu_assign_pointer(tap->taps[tap->numvtaps], q);
157         q->queue_index = tap->numvtaps;
158         q->enabled = true;
159
160         tap->numvtaps++;
161 out:
162         return err;
163 }
164
165 /* Requires RTNL */
166 static int tap_set_queue(struct tap_dev *tap, struct file *file,
167                          struct tap_queue *q)
168 {
169         if (tap->numqueues == MAX_TAP_QUEUES)
170                 return -EBUSY;
171
172         rcu_assign_pointer(q->tap, tap);
173         rcu_assign_pointer(tap->taps[tap->numvtaps], q);
174         sock_hold(&q->sk);
175
176         q->file = file;
177         q->queue_index = tap->numvtaps;
178         q->enabled = true;
179         file->private_data = q;
180         list_add_tail(&q->next, &tap->queue_list);
181
182         tap->numvtaps++;
183         tap->numqueues++;
184
185         return 0;
186 }
187
188 static int tap_disable_queue(struct tap_queue *q)
189 {
190         struct tap_dev *tap;
191         struct tap_queue *nq;
192
193         ASSERT_RTNL();
194         if (!q->enabled)
195                 return -EINVAL;
196
197         tap = rtnl_dereference(q->tap);
198
199         if (tap) {
200                 int index = q->queue_index;
201                 BUG_ON(index >= tap->numvtaps);
202                 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
203                 nq->queue_index = index;
204
205                 rcu_assign_pointer(tap->taps[index], nq);
206                 RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
207                 q->enabled = false;
208
209                 tap->numvtaps--;
210         }
211
212         return 0;
213 }
214
215 /*
216  * The file owning the queue got closed, give up both
217  * the reference that the files holds as well as the
218  * one from the macvlan_dev if that still exists.
219  *
220  * Using the spinlock makes sure that we don't get
221  * to the queue again after destroying it.
222  */
223 static void tap_put_queue(struct tap_queue *q)
224 {
225         struct tap_dev *tap;
226
227         rtnl_lock();
228         tap = rtnl_dereference(q->tap);
229
230         if (tap) {
231                 if (q->enabled)
232                         BUG_ON(tap_disable_queue(q));
233
234                 tap->numqueues--;
235                 RCU_INIT_POINTER(q->tap, NULL);
236                 sock_put(&q->sk);
237                 list_del_init(&q->next);
238         }
239
240         rtnl_unlock();
241
242         synchronize_rcu();
243         sock_put(&q->sk);
244 }
245
246 /*
247  * Select a queue based on the rxq of the device on which this packet
248  * arrived. If the incoming device is not mq, calculate a flow hash
249  * to select a queue. If all fails, find the first available queue.
250  * Cache vlan->numvtaps since it can become zero during the execution
251  * of this function.
252  */
253 static struct tap_queue *tap_get_queue(struct tap_dev *tap,
254                                        struct sk_buff *skb)
255 {
256         struct tap_queue *queue = NULL;
257         /* Access to taps array is protected by rcu, but access to numvtaps
258          * isn't. Below we use it to lookup a queue, but treat it as a hint
259          * and validate that the result isn't NULL - in case we are
260          * racing against queue removal.
261          */
262         int numvtaps = READ_ONCE(tap->numvtaps);
263         __u32 rxq;
264
265         if (!numvtaps)
266                 goto out;
267
268         if (numvtaps == 1)
269                 goto single;
270
271         /* Check if we can use flow to select a queue */
272         rxq = skb_get_hash(skb);
273         if (rxq) {
274                 queue = rcu_dereference(tap->taps[rxq % numvtaps]);
275                 goto out;
276         }
277
278         if (likely(skb_rx_queue_recorded(skb))) {
279                 rxq = skb_get_rx_queue(skb);
280
281                 while (unlikely(rxq >= numvtaps))
282                         rxq -= numvtaps;
283
284                 queue = rcu_dereference(tap->taps[rxq]);
285                 goto out;
286         }
287
288 single:
289         queue = rcu_dereference(tap->taps[0]);
290 out:
291         return queue;
292 }
293
294 /*
295  * The net_device is going away, give up the reference
296  * that it holds on all queues and safely set the pointer
297  * from the queues to NULL.
298  */
299 void tap_del_queues(struct tap_dev *tap)
300 {
301         struct tap_queue *q, *tmp;
302
303         ASSERT_RTNL();
304         list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
305                 list_del_init(&q->next);
306                 RCU_INIT_POINTER(q->tap, NULL);
307                 if (q->enabled)
308                         tap->numvtaps--;
309                 tap->numqueues--;
310                 sock_put(&q->sk);
311         }
312         BUG_ON(tap->numvtaps);
313         BUG_ON(tap->numqueues);
314         /* guarantee that any future tap_set_queue will fail */
315         tap->numvtaps = MAX_TAP_QUEUES;
316 }
317 EXPORT_SYMBOL_GPL(tap_del_queues);
318
319 rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
320 {
321         struct sk_buff *skb = *pskb;
322         struct net_device *dev = skb->dev;
323         struct tap_dev *tap;
324         struct tap_queue *q;
325         netdev_features_t features = TAP_FEATURES;
326         enum skb_drop_reason drop_reason;
327
328         tap = tap_dev_get_rcu(dev);
329         if (!tap)
330                 return RX_HANDLER_PASS;
331
332         q = tap_get_queue(tap, skb);
333         if (!q)
334                 return RX_HANDLER_PASS;
335
336         skb_push(skb, ETH_HLEN);
337
338         /* Apply the forward feature mask so that we perform segmentation
339          * according to users wishes.  This only works if VNET_HDR is
340          * enabled.
341          */
342         if (q->flags & IFF_VNET_HDR)
343                 features |= tap->tap_features;
344         if (netif_needs_gso(skb, features)) {
345                 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
346                 struct sk_buff *next;
347
348                 if (IS_ERR(segs)) {
349                         drop_reason = SKB_DROP_REASON_SKB_GSO_SEG;
350                         goto drop;
351                 }
352
353                 if (!segs) {
354                         if (ptr_ring_produce(&q->ring, skb)) {
355                                 drop_reason = SKB_DROP_REASON_FULL_RING;
356                                 goto drop;
357                         }
358                         goto wake_up;
359                 }
360
361                 consume_skb(skb);
362                 skb_list_walk_safe(segs, skb, next) {
363                         skb_mark_not_on_list(skb);
364                         if (ptr_ring_produce(&q->ring, skb)) {
365                                 drop_reason = SKB_DROP_REASON_FULL_RING;
366                                 kfree_skb_reason(skb, drop_reason);
367                                 kfree_skb_list_reason(next, drop_reason);
368                                 break;
369                         }
370                 }
371         } else {
372                 /* If we receive a partial checksum and the tap side
373                  * doesn't support checksum offload, compute the checksum.
374                  * Note: it doesn't matter which checksum feature to
375                  *        check, we either support them all or none.
376                  */
377                 if (skb->ip_summed == CHECKSUM_PARTIAL &&
378                     !(features & NETIF_F_CSUM_MASK) &&
379                     skb_checksum_help(skb)) {
380                         drop_reason = SKB_DROP_REASON_SKB_CSUM;
381                         goto drop;
382                 }
383                 if (ptr_ring_produce(&q->ring, skb)) {
384                         drop_reason = SKB_DROP_REASON_FULL_RING;
385                         goto drop;
386                 }
387         }
388
389 wake_up:
390         wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
391         return RX_HANDLER_CONSUMED;
392
393 drop:
394         /* Count errors/drops only here, thus don't care about args. */
395         if (tap->count_rx_dropped)
396                 tap->count_rx_dropped(tap);
397         kfree_skb_reason(skb, drop_reason);
398         return RX_HANDLER_CONSUMED;
399 }
400 EXPORT_SYMBOL_GPL(tap_handle_frame);
401
402 static struct major_info *tap_get_major(int major)
403 {
404         struct major_info *tap_major;
405
406         list_for_each_entry_rcu(tap_major, &major_list, next) {
407                 if (tap_major->major == major)
408                         return tap_major;
409         }
410
411         return NULL;
412 }
413
414 int tap_get_minor(dev_t major, struct tap_dev *tap)
415 {
416         int retval = -ENOMEM;
417         struct major_info *tap_major;
418
419         rcu_read_lock();
420         tap_major = tap_get_major(MAJOR(major));
421         if (!tap_major) {
422                 retval = -EINVAL;
423                 goto unlock;
424         }
425
426         spin_lock(&tap_major->minor_lock);
427         retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
428         if (retval >= 0) {
429                 tap->minor = retval;
430         } else if (retval == -ENOSPC) {
431                 netdev_err(tap->dev, "Too many tap devices\n");
432                 retval = -EINVAL;
433         }
434         spin_unlock(&tap_major->minor_lock);
435
436 unlock:
437         rcu_read_unlock();
438         return retval < 0 ? retval : 0;
439 }
440 EXPORT_SYMBOL_GPL(tap_get_minor);
441
442 void tap_free_minor(dev_t major, struct tap_dev *tap)
443 {
444         struct major_info *tap_major;
445
446         rcu_read_lock();
447         tap_major = tap_get_major(MAJOR(major));
448         if (!tap_major) {
449                 goto unlock;
450         }
451
452         spin_lock(&tap_major->minor_lock);
453         if (tap->minor) {
454                 idr_remove(&tap_major->minor_idr, tap->minor);
455                 tap->minor = 0;
456         }
457         spin_unlock(&tap_major->minor_lock);
458
459 unlock:
460         rcu_read_unlock();
461 }
462 EXPORT_SYMBOL_GPL(tap_free_minor);
463
464 static struct tap_dev *dev_get_by_tap_file(int major, int minor)
465 {
466         struct net_device *dev = NULL;
467         struct tap_dev *tap;
468         struct major_info *tap_major;
469
470         rcu_read_lock();
471         tap_major = tap_get_major(major);
472         if (!tap_major) {
473                 tap = NULL;
474                 goto unlock;
475         }
476
477         spin_lock(&tap_major->minor_lock);
478         tap = idr_find(&tap_major->minor_idr, minor);
479         if (tap) {
480                 dev = tap->dev;
481                 dev_hold(dev);
482         }
483         spin_unlock(&tap_major->minor_lock);
484
485 unlock:
486         rcu_read_unlock();
487         return tap;
488 }
489
490 static void tap_sock_write_space(struct sock *sk)
491 {
492         wait_queue_head_t *wqueue;
493
494         if (!sock_writeable(sk) ||
495             !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
496                 return;
497
498         wqueue = sk_sleep(sk);
499         if (wqueue && waitqueue_active(wqueue))
500                 wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
501 }
502
503 static void tap_sock_destruct(struct sock *sk)
504 {
505         struct tap_queue *q = container_of(sk, struct tap_queue, sk);
506
507         ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb);
508 }
509
510 static int tap_open(struct inode *inode, struct file *file)
511 {
512         struct net *net = current->nsproxy->net_ns;
513         struct tap_dev *tap;
514         struct tap_queue *q;
515         int err = -ENODEV;
516
517         rtnl_lock();
518         tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
519         if (!tap)
520                 goto err;
521
522         err = -ENOMEM;
523         q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
524                                              &tap_proto, 0);
525         if (!q)
526                 goto err;
527         if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) {
528                 sk_free(&q->sk);
529                 goto err;
530         }
531
532         init_waitqueue_head(&q->sock.wq.wait);
533         q->sock.type = SOCK_RAW;
534         q->sock.state = SS_CONNECTED;
535         q->sock.file = file;
536         q->sock.ops = &tap_socket_ops;
537         sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
538         q->sk.sk_write_space = tap_sock_write_space;
539         q->sk.sk_destruct = tap_sock_destruct;
540         q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
541         q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
542
543         /*
544          * so far only KVM virtio_net uses tap, enable zero copy between
545          * guest kernel and host kernel when lower device supports zerocopy
546          *
547          * The macvlan supports zerocopy iff the lower device supports zero
548          * copy so we don't have to look at the lower device directly.
549          */
550         if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
551                 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
552
553         err = tap_set_queue(tap, file, q);
554         if (err) {
555                 /* tap_sock_destruct() will take care of freeing ptr_ring */
556                 goto err_put;
557         }
558
559         /* tap groks IOCB_NOWAIT just fine, mark it as such */
560         file->f_mode |= FMODE_NOWAIT;
561
562         dev_put(tap->dev);
563
564         rtnl_unlock();
565         return err;
566
567 err_put:
568         sock_put(&q->sk);
569 err:
570         if (tap)
571                 dev_put(tap->dev);
572
573         rtnl_unlock();
574         return err;
575 }
576
577 static int tap_release(struct inode *inode, struct file *file)
578 {
579         struct tap_queue *q = file->private_data;
580         tap_put_queue(q);
581         return 0;
582 }
583
584 static __poll_t tap_poll(struct file *file, poll_table *wait)
585 {
586         struct tap_queue *q = file->private_data;
587         __poll_t mask = EPOLLERR;
588
589         if (!q)
590                 goto out;
591
592         mask = 0;
593         poll_wait(file, &q->sock.wq.wait, wait);
594
595         if (!ptr_ring_empty(&q->ring))
596                 mask |= EPOLLIN | EPOLLRDNORM;
597
598         if (sock_writeable(&q->sk) ||
599             (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
600              sock_writeable(&q->sk)))
601                 mask |= EPOLLOUT | EPOLLWRNORM;
602
603 out:
604         return mask;
605 }
606
607 static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
608                                             size_t len, size_t linear,
609                                                 int noblock, int *err)
610 {
611         struct sk_buff *skb;
612
613         /* Under a page?  Don't bother with paged skb. */
614         if (prepad + len < PAGE_SIZE || !linear)
615                 linear = len;
616
617         if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
618                 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
619         skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
620                                    err, PAGE_ALLOC_COSTLY_ORDER);
621         if (!skb)
622                 return NULL;
623
624         skb_reserve(skb, prepad);
625         skb_put(skb, linear);
626         skb->data_len = len - linear;
627         skb->len += len - linear;
628
629         return skb;
630 }
631
632 /* Neighbour code has some assumptions on HH_DATA_MOD alignment */
633 #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
634
635 /* Get packet from user space buffer */
636 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
637                             struct iov_iter *from, int noblock)
638 {
639         int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
640         struct sk_buff *skb;
641         struct tap_dev *tap;
642         unsigned long total_len = iov_iter_count(from);
643         unsigned long len = total_len;
644         int err;
645         struct virtio_net_hdr vnet_hdr = { 0 };
646         int vnet_hdr_len = 0;
647         int copylen = 0;
648         int depth;
649         bool zerocopy = false;
650         size_t linear;
651         enum skb_drop_reason drop_reason;
652
653         if (q->flags & IFF_VNET_HDR) {
654                 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
655
656                 err = -EINVAL;
657                 if (len < vnet_hdr_len)
658                         goto err;
659                 len -= vnet_hdr_len;
660
661                 err = -EFAULT;
662                 if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
663                         goto err;
664                 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
665                 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
666                      tap16_to_cpu(q, vnet_hdr.csum_start) +
667                      tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
668                              tap16_to_cpu(q, vnet_hdr.hdr_len))
669                         vnet_hdr.hdr_len = cpu_to_tap16(q,
670                                  tap16_to_cpu(q, vnet_hdr.csum_start) +
671                                  tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
672                 err = -EINVAL;
673                 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
674                         goto err;
675         }
676
677         err = -EINVAL;
678         if (unlikely(len < ETH_HLEN))
679                 goto err;
680
681         if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
682                 struct iov_iter i;
683
684                 copylen = vnet_hdr.hdr_len ?
685                         tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
686                 if (copylen > good_linear)
687                         copylen = good_linear;
688                 else if (copylen < ETH_HLEN)
689                         copylen = ETH_HLEN;
690                 linear = copylen;
691                 i = *from;
692                 iov_iter_advance(&i, copylen);
693                 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
694                         zerocopy = true;
695         }
696
697         if (!zerocopy) {
698                 copylen = len;
699                 linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
700                 if (linear > good_linear)
701                         linear = good_linear;
702                 else if (linear < ETH_HLEN)
703                         linear = ETH_HLEN;
704         }
705
706         skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
707                             linear, noblock, &err);
708         if (!skb)
709                 goto err;
710
711         if (zerocopy)
712                 err = zerocopy_sg_from_iter(skb, from);
713         else
714                 err = skb_copy_datagram_from_iter(skb, 0, from, len);
715
716         if (err) {
717                 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
718                 goto err_kfree;
719         }
720
721         skb_set_network_header(skb, ETH_HLEN);
722         skb_reset_mac_header(skb);
723         skb->protocol = eth_hdr(skb)->h_proto;
724
725         rcu_read_lock();
726         tap = rcu_dereference(q->tap);
727         if (!tap) {
728                 kfree_skb(skb);
729                 rcu_read_unlock();
730                 return total_len;
731         }
732         skb->dev = tap->dev;
733
734         if (vnet_hdr_len) {
735                 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
736                                             tap_is_little_endian(q));
737                 if (err) {
738                         rcu_read_unlock();
739                         drop_reason = SKB_DROP_REASON_DEV_HDR;
740                         goto err_kfree;
741                 }
742         }
743
744         skb_probe_transport_header(skb);
745
746         /* Move network header to the right position for VLAN tagged packets */
747         if (eth_type_vlan(skb->protocol) &&
748             vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
749                 skb_set_network_header(skb, depth);
750
751         /* copy skb_ubuf_info for callback when skb has no error */
752         if (zerocopy) {
753                 skb_zcopy_init(skb, msg_control);
754         } else if (msg_control) {
755                 struct ubuf_info *uarg = msg_control;
756                 uarg->callback(NULL, uarg, false);
757         }
758
759         dev_queue_xmit(skb);
760         rcu_read_unlock();
761         return total_len;
762
763 err_kfree:
764         kfree_skb_reason(skb, drop_reason);
765
766 err:
767         rcu_read_lock();
768         tap = rcu_dereference(q->tap);
769         if (tap && tap->count_tx_dropped)
770                 tap->count_tx_dropped(tap);
771         rcu_read_unlock();
772
773         return err;
774 }
775
776 static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
777 {
778         struct file *file = iocb->ki_filp;
779         struct tap_queue *q = file->private_data;
780         int noblock = 0;
781
782         if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
783                 noblock = 1;
784
785         return tap_get_user(q, NULL, from, noblock);
786 }
787
788 /* Put packet to the user space buffer */
789 static ssize_t tap_put_user(struct tap_queue *q,
790                             const struct sk_buff *skb,
791                             struct iov_iter *iter)
792 {
793         int ret;
794         int vnet_hdr_len = 0;
795         int vlan_offset = 0;
796         int total;
797
798         if (q->flags & IFF_VNET_HDR) {
799                 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0;
800                 struct virtio_net_hdr vnet_hdr;
801
802                 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
803                 if (iov_iter_count(iter) < vnet_hdr_len)
804                         return -EINVAL;
805
806                 if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
807                                             tap_is_little_endian(q), true,
808                                             vlan_hlen))
809                         BUG();
810
811                 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
812                     sizeof(vnet_hdr))
813                         return -EFAULT;
814
815                 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
816         }
817         total = vnet_hdr_len;
818         total += skb->len;
819
820         if (skb_vlan_tag_present(skb)) {
821                 struct {
822                         __be16 h_vlan_proto;
823                         __be16 h_vlan_TCI;
824                 } veth;
825                 veth.h_vlan_proto = skb->vlan_proto;
826                 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
827
828                 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
829                 total += VLAN_HLEN;
830
831                 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
832                 if (ret || !iov_iter_count(iter))
833                         goto done;
834
835                 ret = copy_to_iter(&veth, sizeof(veth), iter);
836                 if (ret != sizeof(veth) || !iov_iter_count(iter))
837                         goto done;
838         }
839
840         ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
841                                      skb->len - vlan_offset);
842
843 done:
844         return ret ? ret : total;
845 }
846
847 static ssize_t tap_do_read(struct tap_queue *q,
848                            struct iov_iter *to,
849                            int noblock, struct sk_buff *skb)
850 {
851         DEFINE_WAIT(wait);
852         ssize_t ret = 0;
853
854         if (!iov_iter_count(to)) {
855                 kfree_skb(skb);
856                 return 0;
857         }
858
859         if (skb)
860                 goto put;
861
862         while (1) {
863                 if (!noblock)
864                         prepare_to_wait(sk_sleep(&q->sk), &wait,
865                                         TASK_INTERRUPTIBLE);
866
867                 /* Read frames from the queue */
868                 skb = ptr_ring_consume(&q->ring);
869                 if (skb)
870                         break;
871                 if (noblock) {
872                         ret = -EAGAIN;
873                         break;
874                 }
875                 if (signal_pending(current)) {
876                         ret = -ERESTARTSYS;
877                         break;
878                 }
879                 /* Nothing to read, let's sleep */
880                 schedule();
881         }
882         if (!noblock)
883                 finish_wait(sk_sleep(&q->sk), &wait);
884
885 put:
886         if (skb) {
887                 ret = tap_put_user(q, skb, to);
888                 if (unlikely(ret < 0))
889                         kfree_skb(skb);
890                 else
891                         consume_skb(skb);
892         }
893         return ret;
894 }
895
896 static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
897 {
898         struct file *file = iocb->ki_filp;
899         struct tap_queue *q = file->private_data;
900         ssize_t len = iov_iter_count(to), ret;
901         int noblock = 0;
902
903         if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
904                 noblock = 1;
905
906         ret = tap_do_read(q, to, noblock, NULL);
907         ret = min_t(ssize_t, ret, len);
908         if (ret > 0)
909                 iocb->ki_pos = ret;
910         return ret;
911 }
912
913 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
914 {
915         struct tap_dev *tap;
916
917         ASSERT_RTNL();
918         tap = rtnl_dereference(q->tap);
919         if (tap)
920                 dev_hold(tap->dev);
921
922         return tap;
923 }
924
925 static void tap_put_tap_dev(struct tap_dev *tap)
926 {
927         dev_put(tap->dev);
928 }
929
930 static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
931 {
932         struct tap_queue *q = file->private_data;
933         struct tap_dev *tap;
934         int ret;
935
936         tap = tap_get_tap_dev(q);
937         if (!tap)
938                 return -EINVAL;
939
940         if (flags & IFF_ATTACH_QUEUE)
941                 ret = tap_enable_queue(tap, file, q);
942         else if (flags & IFF_DETACH_QUEUE)
943                 ret = tap_disable_queue(q);
944         else
945                 ret = -EINVAL;
946
947         tap_put_tap_dev(tap);
948         return ret;
949 }
950
951 static int set_offload(struct tap_queue *q, unsigned long arg)
952 {
953         struct tap_dev *tap;
954         netdev_features_t features;
955         netdev_features_t feature_mask = 0;
956
957         tap = rtnl_dereference(q->tap);
958         if (!tap)
959                 return -ENOLINK;
960
961         features = tap->dev->features;
962
963         if (arg & TUN_F_CSUM) {
964                 feature_mask = NETIF_F_HW_CSUM;
965
966                 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
967                         if (arg & TUN_F_TSO_ECN)
968                                 feature_mask |= NETIF_F_TSO_ECN;
969                         if (arg & TUN_F_TSO4)
970                                 feature_mask |= NETIF_F_TSO;
971                         if (arg & TUN_F_TSO6)
972                                 feature_mask |= NETIF_F_TSO6;
973                 }
974
975                 /* TODO: for now USO4 and USO6 should work simultaneously */
976                 if ((arg & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6))
977                         features |= NETIF_F_GSO_UDP_L4;
978         }
979
980         /* tun/tap driver inverts the usage for TSO offloads, where
981          * setting the TSO bit means that the userspace wants to
982          * accept TSO frames and turning it off means that user space
983          * does not support TSO.
984          * For tap, we have to invert it to mean the same thing.
985          * When user space turns off TSO, we turn off GSO/LRO so that
986          * user-space will not receive TSO frames.
987          */
988         if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6) ||
989             (feature_mask & (TUN_F_USO4 | TUN_F_USO6)) == (TUN_F_USO4 | TUN_F_USO6))
990                 features |= RX_OFFLOADS;
991         else
992                 features &= ~RX_OFFLOADS;
993
994         /* tap_features are the same as features on tun/tap and
995          * reflect user expectations.
996          */
997         tap->tap_features = feature_mask;
998         if (tap->update_features)
999                 tap->update_features(tap, features);
1000
1001         return 0;
1002 }
1003
1004 /*
1005  * provide compatibility with generic tun/tap interface
1006  */
1007 static long tap_ioctl(struct file *file, unsigned int cmd,
1008                       unsigned long arg)
1009 {
1010         struct tap_queue *q = file->private_data;
1011         struct tap_dev *tap;
1012         void __user *argp = (void __user *)arg;
1013         struct ifreq __user *ifr = argp;
1014         unsigned int __user *up = argp;
1015         unsigned short u;
1016         int __user *sp = argp;
1017         struct sockaddr sa;
1018         int s;
1019         int ret;
1020
1021         switch (cmd) {
1022         case TUNSETIFF:
1023                 /* ignore the name, just look at flags */
1024                 if (get_user(u, &ifr->ifr_flags))
1025                         return -EFAULT;
1026
1027                 ret = 0;
1028                 if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
1029                         ret = -EINVAL;
1030                 else
1031                         q->flags = (q->flags & ~TAP_IFFEATURES) | u;
1032
1033                 return ret;
1034
1035         case TUNGETIFF:
1036                 rtnl_lock();
1037                 tap = tap_get_tap_dev(q);
1038                 if (!tap) {
1039                         rtnl_unlock();
1040                         return -ENOLINK;
1041                 }
1042
1043                 ret = 0;
1044                 u = q->flags;
1045                 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1046                     put_user(u, &ifr->ifr_flags))
1047                         ret = -EFAULT;
1048                 tap_put_tap_dev(tap);
1049                 rtnl_unlock();
1050                 return ret;
1051
1052         case TUNSETQUEUE:
1053                 if (get_user(u, &ifr->ifr_flags))
1054                         return -EFAULT;
1055                 rtnl_lock();
1056                 ret = tap_ioctl_set_queue(file, u);
1057                 rtnl_unlock();
1058                 return ret;
1059
1060         case TUNGETFEATURES:
1061                 if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
1062                         return -EFAULT;
1063                 return 0;
1064
1065         case TUNSETSNDBUF:
1066                 if (get_user(s, sp))
1067                         return -EFAULT;
1068                 if (s <= 0)
1069                         return -EINVAL;
1070
1071                 q->sk.sk_sndbuf = s;
1072                 return 0;
1073
1074         case TUNGETVNETHDRSZ:
1075                 s = q->vnet_hdr_sz;
1076                 if (put_user(s, sp))
1077                         return -EFAULT;
1078                 return 0;
1079
1080         case TUNSETVNETHDRSZ:
1081                 if (get_user(s, sp))
1082                         return -EFAULT;
1083                 if (s < (int)sizeof(struct virtio_net_hdr))
1084                         return -EINVAL;
1085
1086                 q->vnet_hdr_sz = s;
1087                 return 0;
1088
1089         case TUNGETVNETLE:
1090                 s = !!(q->flags & TAP_VNET_LE);
1091                 if (put_user(s, sp))
1092                         return -EFAULT;
1093                 return 0;
1094
1095         case TUNSETVNETLE:
1096                 if (get_user(s, sp))
1097                         return -EFAULT;
1098                 if (s)
1099                         q->flags |= TAP_VNET_LE;
1100                 else
1101                         q->flags &= ~TAP_VNET_LE;
1102                 return 0;
1103
1104         case TUNGETVNETBE:
1105                 return tap_get_vnet_be(q, sp);
1106
1107         case TUNSETVNETBE:
1108                 return tap_set_vnet_be(q, sp);
1109
1110         case TUNSETOFFLOAD:
1111                 /* let the user check for future flags */
1112                 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1113                             TUN_F_TSO_ECN | TUN_F_UFO |
1114                             TUN_F_USO4 | TUN_F_USO6))
1115                         return -EINVAL;
1116
1117                 rtnl_lock();
1118                 ret = set_offload(q, arg);
1119                 rtnl_unlock();
1120                 return ret;
1121
1122         case SIOCGIFHWADDR:
1123                 rtnl_lock();
1124                 tap = tap_get_tap_dev(q);
1125                 if (!tap) {
1126                         rtnl_unlock();
1127                         return -ENOLINK;
1128                 }
1129                 ret = 0;
1130                 dev_get_mac_address(&sa, dev_net(tap->dev), tap->dev->name);
1131                 if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
1132                     copy_to_user(&ifr->ifr_hwaddr, &sa, sizeof(sa)))
1133                         ret = -EFAULT;
1134                 tap_put_tap_dev(tap);
1135                 rtnl_unlock();
1136                 return ret;
1137
1138         case SIOCSIFHWADDR:
1139                 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1140                         return -EFAULT;
1141                 rtnl_lock();
1142                 tap = tap_get_tap_dev(q);
1143                 if (!tap) {
1144                         rtnl_unlock();
1145                         return -ENOLINK;
1146                 }
1147                 ret = dev_set_mac_address_user(tap->dev, &sa, NULL);
1148                 tap_put_tap_dev(tap);
1149                 rtnl_unlock();
1150                 return ret;
1151
1152         default:
1153                 return -EINVAL;
1154         }
1155 }
1156
1157 static const struct file_operations tap_fops = {
1158         .owner          = THIS_MODULE,
1159         .open           = tap_open,
1160         .release        = tap_release,
1161         .read_iter      = tap_read_iter,
1162         .write_iter     = tap_write_iter,
1163         .poll           = tap_poll,
1164         .llseek         = no_llseek,
1165         .unlocked_ioctl = tap_ioctl,
1166         .compat_ioctl   = compat_ptr_ioctl,
1167 };
1168
1169 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
1170 {
1171         struct tun_xdp_hdr *hdr = xdp->data_hard_start;
1172         struct virtio_net_hdr *gso = &hdr->gso;
1173         int buflen = hdr->buflen;
1174         int vnet_hdr_len = 0;
1175         struct tap_dev *tap;
1176         struct sk_buff *skb;
1177         int err, depth;
1178
1179         if (q->flags & IFF_VNET_HDR)
1180                 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
1181
1182         skb = build_skb(xdp->data_hard_start, buflen);
1183         if (!skb) {
1184                 err = -ENOMEM;
1185                 goto err;
1186         }
1187
1188         skb_reserve(skb, xdp->data - xdp->data_hard_start);
1189         skb_put(skb, xdp->data_end - xdp->data);
1190
1191         skb_set_network_header(skb, ETH_HLEN);
1192         skb_reset_mac_header(skb);
1193         skb->protocol = eth_hdr(skb)->h_proto;
1194
1195         if (vnet_hdr_len) {
1196                 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q));
1197                 if (err)
1198                         goto err_kfree;
1199         }
1200
1201         /* Move network header to the right position for VLAN tagged packets */
1202         if (eth_type_vlan(skb->protocol) &&
1203             vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1204                 skb_set_network_header(skb, depth);
1205
1206         rcu_read_lock();
1207         tap = rcu_dereference(q->tap);
1208         if (tap) {
1209                 skb->dev = tap->dev;
1210                 skb_probe_transport_header(skb);
1211                 dev_queue_xmit(skb);
1212         } else {
1213                 kfree_skb(skb);
1214         }
1215         rcu_read_unlock();
1216
1217         return 0;
1218
1219 err_kfree:
1220         kfree_skb(skb);
1221 err:
1222         rcu_read_lock();
1223         tap = rcu_dereference(q->tap);
1224         if (tap && tap->count_tx_dropped)
1225                 tap->count_tx_dropped(tap);
1226         rcu_read_unlock();
1227         return err;
1228 }
1229
1230 static int tap_sendmsg(struct socket *sock, struct msghdr *m,
1231                        size_t total_len)
1232 {
1233         struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1234         struct tun_msg_ctl *ctl = m->msg_control;
1235         struct xdp_buff *xdp;
1236         int i;
1237
1238         if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
1239             ctl && ctl->type == TUN_MSG_PTR) {
1240                 for (i = 0; i < ctl->num; i++) {
1241                         xdp = &((struct xdp_buff *)ctl->ptr)[i];
1242                         tap_get_user_xdp(q, xdp);
1243                 }
1244                 return 0;
1245         }
1246
1247         return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter,
1248                             m->msg_flags & MSG_DONTWAIT);
1249 }
1250
1251 static int tap_recvmsg(struct socket *sock, struct msghdr *m,
1252                        size_t total_len, int flags)
1253 {
1254         struct tap_queue *q = container_of(sock, struct tap_queue, sock);
1255         struct sk_buff *skb = m->msg_control;
1256         int ret;
1257         if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
1258                 kfree_skb(skb);
1259                 return -EINVAL;
1260         }
1261         ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb);
1262         if (ret > total_len) {
1263                 m->msg_flags |= MSG_TRUNC;
1264                 ret = flags & MSG_TRUNC ? ret : total_len;
1265         }
1266         return ret;
1267 }
1268
1269 static int tap_peek_len(struct socket *sock)
1270 {
1271         struct tap_queue *q = container_of(sock, struct tap_queue,
1272                                                sock);
1273         return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag);
1274 }
1275
1276 /* Ops structure to mimic raw sockets with tun */
1277 static const struct proto_ops tap_socket_ops = {
1278         .sendmsg = tap_sendmsg,
1279         .recvmsg = tap_recvmsg,
1280         .peek_len = tap_peek_len,
1281 };
1282
1283 /* Get an underlying socket object from tun file.  Returns error unless file is
1284  * attached to a device.  The returned object works like a packet socket, it
1285  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
1286  * holding a reference to the file for as long as the socket is in use. */
1287 struct socket *tap_get_socket(struct file *file)
1288 {
1289         struct tap_queue *q;
1290         if (file->f_op != &tap_fops)
1291                 return ERR_PTR(-EINVAL);
1292         q = file->private_data;
1293         if (!q)
1294                 return ERR_PTR(-EBADFD);
1295         return &q->sock;
1296 }
1297 EXPORT_SYMBOL_GPL(tap_get_socket);
1298
1299 struct ptr_ring *tap_get_ptr_ring(struct file *file)
1300 {
1301         struct tap_queue *q;
1302
1303         if (file->f_op != &tap_fops)
1304                 return ERR_PTR(-EINVAL);
1305         q = file->private_data;
1306         if (!q)
1307                 return ERR_PTR(-EBADFD);
1308         return &q->ring;
1309 }
1310 EXPORT_SYMBOL_GPL(tap_get_ptr_ring);
1311
1312 int tap_queue_resize(struct tap_dev *tap)
1313 {
1314         struct net_device *dev = tap->dev;
1315         struct tap_queue *q;
1316         struct ptr_ring **rings;
1317         int n = tap->numqueues;
1318         int ret, i = 0;
1319
1320         rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
1321         if (!rings)
1322                 return -ENOMEM;
1323
1324         list_for_each_entry(q, &tap->queue_list, next)
1325                 rings[i++] = &q->ring;
1326
1327         ret = ptr_ring_resize_multiple(rings, n,
1328                                        dev->tx_queue_len, GFP_KERNEL,
1329                                        __skb_array_destroy_skb);
1330
1331         kfree(rings);
1332         return ret;
1333 }
1334 EXPORT_SYMBOL_GPL(tap_queue_resize);
1335
1336 static int tap_list_add(dev_t major, const char *device_name)
1337 {
1338         struct major_info *tap_major;
1339
1340         tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
1341         if (!tap_major)
1342                 return -ENOMEM;
1343
1344         tap_major->major = MAJOR(major);
1345
1346         idr_init(&tap_major->minor_idr);
1347         spin_lock_init(&tap_major->minor_lock);
1348
1349         tap_major->device_name = device_name;
1350
1351         list_add_tail_rcu(&tap_major->next, &major_list);
1352         return 0;
1353 }
1354
1355 int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
1356                     const char *device_name, struct module *module)
1357 {
1358         int err;
1359
1360         err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
1361         if (err)
1362                 goto out1;
1363
1364         cdev_init(tap_cdev, &tap_fops);
1365         tap_cdev->owner = module;
1366         err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
1367         if (err)
1368                 goto out2;
1369
1370         err =  tap_list_add(*tap_major, device_name);
1371         if (err)
1372                 goto out3;
1373
1374         return 0;
1375
1376 out3:
1377         cdev_del(tap_cdev);
1378 out2:
1379         unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
1380 out1:
1381         return err;
1382 }
1383 EXPORT_SYMBOL_GPL(tap_create_cdev);
1384
1385 void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
1386 {
1387         struct major_info *tap_major, *tmp;
1388
1389         cdev_del(tap_cdev);
1390         unregister_chrdev_region(major, TAP_NUM_DEVS);
1391         list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
1392                 if (tap_major->major == MAJOR(major)) {
1393                         idr_destroy(&tap_major->minor_idr);
1394                         list_del_rcu(&tap_major->next);
1395                         kfree_rcu(tap_major, rcu);
1396                 }
1397         }
1398 }
1399 EXPORT_SYMBOL_GPL(tap_destroy_cdev);
1400
1401 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1402 MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
1403 MODULE_LICENSE("GPL");