1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - 2019 Cambridge Greys Limited
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
7 * James Leu (jleu@mindspring.net).
8 * Copyright (C) 2001 by various other people who didn't put their name here.
11 #include <linux/memblock.h>
12 #include <linux/etherdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/inetdevice.h>
15 #include <linux/init.h>
16 #include <linux/list.h>
17 #include <linux/netdevice.h>
18 #include <linux/platform_device.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/firmware.h>
25 #include <uapi/linux/filter.h>
31 #include "mconsole_kern.h"
32 #include "vector_user.h"
33 #include "vector_kern.h"
36 * Adapted from network devices with the following major changes:
37 * All transports are static - simplifies the code significantly
38 * Multiple FDs/IRQs per device
39 * Vector IO optionally used for read/write, falling back to legacy
40 * based on configuration and/or availability
41 * Configuration is no longer positional - L2TPv3 and GRE require up to
42 * 10 parameters, passing this as positional is not fit for purpose.
43 * Only socket transports are supported
47 #define DRIVER_NAME "uml-vector"
48 struct vector_cmd_line_arg {
49 struct list_head list;
54 struct vector_device {
55 struct list_head list;
56 struct net_device *dev;
57 struct platform_device pdev;
62 static LIST_HEAD(vec_cmd_line);
64 static DEFINE_SPINLOCK(vector_devices_lock);
65 static LIST_HEAD(vector_devices);
67 static int driver_registered;
69 static void vector_eth_configure(int n, struct arglist *def);
71 /* Argument accessors to set variables (and/or set default values)
72 * mtu, buffer sizing, default headroom, etc
75 #define DEFAULT_HEADROOM 2
76 #define SAFETY_MARGIN 32
77 #define DEFAULT_VECTOR_SIZE 64
78 #define TX_SMALL_PACKET 128
79 #define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
80 #define MAX_ITERATIONS 64
83 const char string[ETH_GSTRING_LEN];
84 } ethtool_stats_keys[] = {
86 { "rx_queue_running_average" },
88 { "tx_queue_running_average" },
89 { "rx_encaps_errors" },
90 { "tx_timeout_count" },
91 { "tx_restart_queue" },
93 { "tx_flow_control_xon" },
94 { "tx_flow_control_xoff" },
95 { "rx_csum_offload_good" },
96 { "rx_csum_offload_errors"},
101 #define VECTOR_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
103 static void vector_reset_stats(struct vector_private *vp)
105 vp->estats.rx_queue_max = 0;
106 vp->estats.rx_queue_running_average = 0;
107 vp->estats.tx_queue_max = 0;
108 vp->estats.tx_queue_running_average = 0;
109 vp->estats.rx_encaps_errors = 0;
110 vp->estats.tx_timeout_count = 0;
111 vp->estats.tx_restart_queue = 0;
112 vp->estats.tx_kicks = 0;
113 vp->estats.tx_flow_control_xon = 0;
114 vp->estats.tx_flow_control_xoff = 0;
115 vp->estats.sg_ok = 0;
116 vp->estats.sg_linearized = 0;
119 static int get_mtu(struct arglist *def)
121 char *mtu = uml_vector_fetch_arg(def, "mtu");
125 if (kstrtoul(mtu, 10, &result) == 0)
126 if ((result < (1 << 16) - 1) && (result >= 576))
129 return ETH_MAX_PACKET;
132 static char *get_bpf_file(struct arglist *def)
134 return uml_vector_fetch_arg(def, "bpffile");
137 static bool get_bpf_flash(struct arglist *def)
139 char *allow = uml_vector_fetch_arg(def, "bpfflash");
143 if (kstrtoul(allow, 10, &result) == 0)
149 static int get_depth(struct arglist *def)
151 char *mtu = uml_vector_fetch_arg(def, "depth");
155 if (kstrtoul(mtu, 10, &result) == 0)
158 return DEFAULT_VECTOR_SIZE;
161 static int get_headroom(struct arglist *def)
163 char *mtu = uml_vector_fetch_arg(def, "headroom");
167 if (kstrtoul(mtu, 10, &result) == 0)
170 return DEFAULT_HEADROOM;
173 static int get_req_size(struct arglist *def)
175 char *gro = uml_vector_fetch_arg(def, "gro");
179 if (kstrtoul(gro, 10, &result) == 0) {
184 return get_mtu(def) + ETH_HEADER_OTHER +
185 get_headroom(def) + SAFETY_MARGIN;
189 static int get_transport_options(struct arglist *def)
191 char *transport = uml_vector_fetch_arg(def, "transport");
192 char *vector = uml_vector_fetch_arg(def, "vec");
194 int vec_rx = VECTOR_RX;
195 int vec_tx = VECTOR_TX;
199 if (transport == NULL)
202 if (vector != NULL) {
203 if (kstrtoul(vector, 10, &parsed) == 0) {
211 if (get_bpf_flash(def))
212 result = VECTOR_BPF_FLASH;
214 if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
216 if (strncmp(transport, TRANS_HYBRID, TRANS_HYBRID_LEN) == 0)
217 return (result | vec_rx | VECTOR_BPF);
218 if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
219 return (result | vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
220 return (result | vec_rx | vec_tx);
224 /* A mini-buffer for packet drop read
225 * All of our supported transports are datagram oriented and we always
226 * read using recvmsg or recvmmsg. If we pass a buffer which is smaller
227 * than the packet size it still counts as full packet read and will
228 * clean the incoming stream to keep sigio/epoll happy
231 #define DROP_BUFFER_SIZE 32
233 static char *drop_buffer;
235 /* Array backed queues optimized for bulk enqueue/dequeue and
236 * 1:N (small values of N) or 1:1 enqueuer/dequeuer ratios.
237 * For more details and full design rationale see
238 * http://foswiki.cambridgegreys.com/Main/EatYourTailAndEnjoyIt
243 * Advance the mmsg queue head by n = advance. Resets the queue to
244 * maximum enqueue/dequeue-at-once capacity if possible. Called by
245 * dequeuers. Caller must hold the head_lock!
248 static int vector_advancehead(struct vector_queue *qi, int advance)
257 spin_lock(&qi->tail_lock);
258 qi->queue_depth -= advance;
260 /* we are at 0, use this to
261 * reset head and tail so we can use max size vectors
264 if (qi->queue_depth == 0) {
268 queue_depth = qi->queue_depth;
269 spin_unlock(&qi->tail_lock);
273 /* Advance the queue tail by n = advance.
274 * This is called by enqueuers which should hold the
278 static int vector_advancetail(struct vector_queue *qi, int advance)
285 spin_lock(&qi->head_lock);
286 qi->queue_depth += advance;
287 queue_depth = qi->queue_depth;
288 spin_unlock(&qi->head_lock);
292 static int prep_msg(struct vector_private *vp,
298 skb_frag_t *skb_frag;
300 nr_frags = skb_shinfo(skb)->nr_frags;
301 if (nr_frags > MAX_IOV_SIZE) {
302 if (skb_linearize(skb) != 0)
305 if (vp->header_size > 0) {
306 iov[iov_index].iov_len = vp->header_size;
307 vp->form_header(iov[iov_index].iov_base, skb, vp);
310 iov[iov_index].iov_base = skb->data;
312 iov[iov_index].iov_len = skb->len - skb->data_len;
315 iov[iov_index].iov_len = skb->len;
317 for (frag = 0; frag < nr_frags; frag++) {
318 skb_frag = &skb_shinfo(skb)->frags[frag];
319 iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
320 iov[iov_index].iov_len = skb_frag_size(skb_frag);
328 * Generic vector enqueue with support for forming headers using transport
329 * specific callback. Allows GRE, L2TPv3, RAW and other transports
330 * to use a common enqueue procedure in vector mode
333 static int vector_enqueue(struct vector_queue *qi, struct sk_buff *skb)
335 struct vector_private *vp = netdev_priv(qi->dev);
338 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
341 spin_lock(&qi->tail_lock);
342 spin_lock(&qi->head_lock);
343 queue_depth = qi->queue_depth;
344 spin_unlock(&qi->head_lock);
347 packet_len = skb->len;
349 if (queue_depth < qi->max_depth) {
351 *(qi->skbuff_vector + qi->tail) = skb;
352 mmsg_vector += qi->tail;
353 iov_count = prep_msg(
356 mmsg_vector->msg_hdr.msg_iov
360 mmsg_vector->msg_hdr.msg_iovlen = iov_count;
361 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr;
362 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size;
363 queue_depth = vector_advancetail(qi, 1);
366 spin_unlock(&qi->tail_lock);
369 qi->dev->stats.tx_dropped++;
371 packet_len = skb->len;
372 dev_consume_skb_any(skb);
373 netdev_completed_queue(qi->dev, 1, packet_len);
375 spin_unlock(&qi->tail_lock);
379 static int consume_vector_skbs(struct vector_queue *qi, int count)
385 for (skb_index = qi->head; skb_index < qi->head + count; skb_index++) {
386 skb = *(qi->skbuff_vector + skb_index);
387 /* mark as empty to ensure correct destruction if
390 bytes_compl += skb->len;
391 *(qi->skbuff_vector + skb_index) = NULL;
392 dev_consume_skb_any(skb);
394 qi->dev->stats.tx_bytes += bytes_compl;
395 qi->dev->stats.tx_packets += count;
396 netdev_completed_queue(qi->dev, count, bytes_compl);
397 return vector_advancehead(qi, count);
401 * Generic vector deque via sendmmsg with support for forming headers
402 * using transport specific callback. Allows GRE, L2TPv3, RAW and
403 * other transports to use a common dequeue procedure in vector mode
407 static int vector_send(struct vector_queue *qi)
409 struct vector_private *vp = netdev_priv(qi->dev);
410 struct mmsghdr *send_from;
411 int result = 0, send_len, queue_depth = qi->max_depth;
413 if (spin_trylock(&qi->head_lock)) {
414 if (spin_trylock(&qi->tail_lock)) {
415 /* update queue_depth to current value */
416 queue_depth = qi->queue_depth;
417 spin_unlock(&qi->tail_lock);
418 while (queue_depth > 0) {
419 /* Calculate the start of the vector */
420 send_len = queue_depth;
421 send_from = qi->mmsg_vector;
422 send_from += qi->head;
423 /* Adjust vector size if wraparound */
424 if (send_len + qi->head > qi->max_depth)
425 send_len = qi->max_depth - qi->head;
426 /* Try to TX as many packets as possible */
428 result = uml_vector_sendmmsg(
435 (result != send_len);
437 /* For some of the sendmmsg error scenarios
438 * we may end being unsure in the TX success
439 * for all packets. It is safer to declare
440 * them all TX-ed and blame the network.
444 netdev_err(vp->dev, "sendmmsg err=%i\n",
451 consume_vector_skbs(qi, result);
452 /* This is equivalent to an TX IRQ.
453 * Restart the upper layers to feed us
456 if (result > vp->estats.tx_queue_max)
457 vp->estats.tx_queue_max = result;
458 vp->estats.tx_queue_running_average =
459 (vp->estats.tx_queue_running_average + result) >> 1;
461 netif_trans_update(qi->dev);
462 netif_wake_queue(qi->dev);
463 /* if TX is busy, break out of the send loop,
464 * poll write IRQ will reschedule xmit for us
466 if (result != send_len) {
467 vp->estats.tx_restart_queue++;
472 spin_unlock(&qi->head_lock);
474 tasklet_schedule(&vp->tx_poll);
479 /* Queue destructor. Deliberately stateless so we can use
480 * it in queue cleanup if initialization fails.
483 static void destroy_queue(struct vector_queue *qi)
487 struct vector_private *vp = netdev_priv(qi->dev);
488 struct mmsghdr *mmsg_vector;
492 /* deallocate any skbuffs - we rely on any unused to be
495 if (qi->skbuff_vector != NULL) {
496 for (i = 0; i < qi->max_depth; i++) {
497 if (*(qi->skbuff_vector + i) != NULL)
498 dev_kfree_skb_any(*(qi->skbuff_vector + i));
500 kfree(qi->skbuff_vector);
502 /* deallocate matching IOV structures including header buffs */
503 if (qi->mmsg_vector != NULL) {
504 mmsg_vector = qi->mmsg_vector;
505 for (i = 0; i < qi->max_depth; i++) {
506 iov = mmsg_vector->msg_hdr.msg_iov;
508 if ((vp->header_size > 0) &&
509 (iov->iov_base != NULL))
510 kfree(iov->iov_base);
515 kfree(qi->mmsg_vector);
521 * Queue constructor. Create a queue with a given side.
523 static struct vector_queue *create_queue(
524 struct vector_private *vp,
529 struct vector_queue *result;
532 struct mmsghdr *mmsg_vector;
534 result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
537 result->max_depth = max_size;
538 result->dev = vp->dev;
539 result->mmsg_vector = kmalloc(
540 (sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
541 if (result->mmsg_vector == NULL)
543 result->skbuff_vector = kmalloc(
544 (sizeof(void *) * max_size), GFP_KERNEL);
545 if (result->skbuff_vector == NULL)
548 /* further failures can be handled safely by destroy_queue*/
550 mmsg_vector = result->mmsg_vector;
551 for (i = 0; i < max_size; i++) {
552 /* Clear all pointers - we use non-NULL as marking on
553 * what to free on destruction
555 *(result->skbuff_vector + i) = NULL;
556 mmsg_vector->msg_hdr.msg_iov = NULL;
559 mmsg_vector = result->mmsg_vector;
560 result->max_iov_frags = num_extra_frags;
561 for (i = 0; i < max_size; i++) {
562 if (vp->header_size > 0)
563 iov = kmalloc_array(3 + num_extra_frags,
564 sizeof(struct iovec),
568 iov = kmalloc_array(2 + num_extra_frags,
569 sizeof(struct iovec),
574 mmsg_vector->msg_hdr.msg_iov = iov;
575 mmsg_vector->msg_hdr.msg_iovlen = 1;
576 mmsg_vector->msg_hdr.msg_control = NULL;
577 mmsg_vector->msg_hdr.msg_controllen = 0;
578 mmsg_vector->msg_hdr.msg_flags = MSG_DONTWAIT;
579 mmsg_vector->msg_hdr.msg_name = NULL;
580 mmsg_vector->msg_hdr.msg_namelen = 0;
581 if (vp->header_size > 0) {
582 iov->iov_base = kmalloc(header_size, GFP_KERNEL);
583 if (iov->iov_base == NULL)
585 iov->iov_len = header_size;
586 mmsg_vector->msg_hdr.msg_iovlen = 2;
589 iov->iov_base = NULL;
593 spin_lock_init(&result->head_lock);
594 spin_lock_init(&result->tail_lock);
595 result->queue_depth = 0;
600 kfree(result->mmsg_vector);
605 destroy_queue(result);
610 * We do not use the RX queue as a proper wraparound queue for now
611 * This is not necessary because the consumption via netif_rx()
612 * happens in-line. While we can try using the return code of
613 * netif_rx() for flow control there are no drivers doing this today.
614 * For this RX specific use we ignore the tail/head locks and
615 * just read into a prepared queue filled with skbuffs.
618 static struct sk_buff *prep_skb(
619 struct vector_private *vp,
620 struct user_msghdr *msg)
622 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN;
623 struct sk_buff *result;
624 int iov_index = 0, len;
625 struct iovec *iov = msg->msg_iov;
626 int err, nr_frags, frag;
627 skb_frag_t *skb_frag;
629 if (vp->req_size <= linear)
633 result = alloc_skb_with_frags(
635 len - vp->max_packet,
640 if (vp->header_size > 0)
642 if (result == NULL) {
643 iov[iov_index].iov_base = NULL;
644 iov[iov_index].iov_len = 0;
647 skb_reserve(result, vp->headroom);
648 result->dev = vp->dev;
649 skb_put(result, vp->max_packet);
650 result->data_len = len - vp->max_packet;
651 result->len += len - vp->max_packet;
652 skb_reset_mac_header(result);
653 result->ip_summed = CHECKSUM_NONE;
654 iov[iov_index].iov_base = result->data;
655 iov[iov_index].iov_len = vp->max_packet;
658 nr_frags = skb_shinfo(result)->nr_frags;
659 for (frag = 0; frag < nr_frags; frag++) {
660 skb_frag = &skb_shinfo(result)->frags[frag];
661 iov[iov_index].iov_base = skb_frag_address_safe(skb_frag);
662 if (iov[iov_index].iov_base != NULL)
663 iov[iov_index].iov_len = skb_frag_size(skb_frag);
665 iov[iov_index].iov_len = 0;
669 msg->msg_iovlen = iov_index;
674 /* Prepare queue for recvmmsg one-shot rx - fill with fresh sk_buffs*/
676 static void prep_queue_for_rx(struct vector_queue *qi)
678 struct vector_private *vp = netdev_priv(qi->dev);
679 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
680 void **skbuff_vector = qi->skbuff_vector;
683 if (qi->queue_depth == 0)
685 for (i = 0; i < qi->queue_depth; i++) {
686 /* it is OK if allocation fails - recvmmsg with NULL data in
687 * iov argument still performs an RX, just drops the packet
688 * This allows us stop faffing around with a "drop buffer"
691 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr);
698 static struct vector_device *find_device(int n)
700 struct vector_device *device;
701 struct list_head *ele;
703 spin_lock(&vector_devices_lock);
704 list_for_each(ele, &vector_devices) {
705 device = list_entry(ele, struct vector_device, list);
706 if (device->unit == n)
711 spin_unlock(&vector_devices_lock);
715 static int vector_parse(char *str, int *index_out, char **str_out,
723 while ((*str != ':') && (strlen(str) > 1))
726 *error_out = "Expected ':' after device number";
731 err = kstrtouint(start, 0, &n);
733 *error_out = "Bad device number";
738 if (find_device(n)) {
739 *error_out = "Device already configured";
748 static int vector_config(char *str, char **error_out)
752 struct arglist *parsed;
754 err = vector_parse(str, &n, ¶ms, error_out);
758 /* This string is broken up and the pieces used by the underlying
759 * driver. We should copy it to make sure things do not go wrong
763 params = kstrdup(params, GFP_KERNEL);
764 if (params == NULL) {
765 *error_out = "vector_config failed to strdup string";
769 parsed = uml_parse_vector_ifspec(params);
771 if (parsed == NULL) {
772 *error_out = "vector_config failed to parse parameters";
776 vector_eth_configure(n, parsed);
780 static int vector_id(char **str, int *start_out, int *end_out)
785 n = simple_strtoul(*str, &end, 0);
786 if ((*end != '\0') || (end == *str))
795 static int vector_remove(int n, char **error_out)
797 struct vector_device *vec_d;
798 struct net_device *dev;
799 struct vector_private *vp;
801 vec_d = find_device(n);
805 vp = netdev_priv(dev);
808 unregister_netdev(dev);
809 platform_device_unregister(&vec_d->pdev);
814 * There is no shared per-transport initialization code, so
815 * we will just initialize each interface one by one and
819 static struct platform_driver uml_net_driver = {
826 static void vector_device_release(struct device *dev)
828 struct vector_device *device = dev_get_drvdata(dev);
829 struct net_device *netdev = device->dev;
831 list_del(&device->list);
836 /* Bog standard recv using recvmsg - not used normally unless the user
837 * explicitly specifies not to use recvmmsg vector RX.
840 static int vector_legacy_rx(struct vector_private *vp)
843 struct user_msghdr hdr;
844 struct iovec iov[2 + MAX_IOV_SIZE]; /* header + data use case only */
851 hdr.msg_iov = (struct iovec *) &iov;
852 hdr.msg_control = NULL;
853 hdr.msg_controllen = 0;
856 if (vp->header_size > 0) {
857 iov[0].iov_base = vp->header_rxbuffer;
858 iov[0].iov_len = vp->header_size;
861 skb = prep_skb(vp, &hdr);
864 /* Read a packet into drop_buffer and don't do
867 iov[iovpos].iov_base = drop_buffer;
868 iov[iovpos].iov_len = DROP_BUFFER_SIZE;
870 vp->dev->stats.rx_dropped++;
873 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0);
880 if (pkt_len > vp->header_size) {
881 if (vp->header_size > 0) {
882 header_check = vp->verify_header(
883 vp->header_rxbuffer, skb, vp);
884 if (header_check < 0) {
885 dev_kfree_skb_irq(skb);
886 vp->dev->stats.rx_dropped++;
887 vp->estats.rx_encaps_errors++;
890 if (header_check > 0) {
891 vp->estats.rx_csum_offload_good++;
892 skb->ip_summed = CHECKSUM_UNNECESSARY;
895 pskb_trim(skb, pkt_len - vp->rx_header_size);
896 skb->protocol = eth_type_trans(skb, skb->dev);
897 vp->dev->stats.rx_bytes += skb->len;
898 vp->dev->stats.rx_packets++;
901 dev_kfree_skb_irq(skb);
908 * Packet at a time TX which falls back to vector TX if the
909 * underlying transport is busy.
914 static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
916 struct iovec iov[3 + MAX_IOV_SIZE];
917 int iov_count, pkt_len = 0;
919 iov[0].iov_base = vp->header_txbuffer;
920 iov_count = prep_msg(vp, skb, (struct iovec *) &iov);
925 pkt_len = uml_vector_writev(
927 (struct iovec *) &iov,
934 netif_trans_update(vp->dev);
935 netif_wake_queue(vp->dev);
938 vp->dev->stats.tx_bytes += skb->len;
939 vp->dev->stats.tx_packets++;
941 vp->dev->stats.tx_dropped++;
946 vp->dev->stats.tx_dropped++;
954 * Receive as many messages as we can in one call using the special
955 * mmsg vector matched to an skb vector which we prepared earlier.
958 static int vector_mmsg_rx(struct vector_private *vp)
961 struct vector_queue *qi = vp->rx_queue;
963 struct mmsghdr *mmsg_vector = qi->mmsg_vector;
964 void **skbuff_vector = qi->skbuff_vector;
967 /* Refresh the vector and make sure it is with new skbs and the
968 * iovs are updated to point to them.
971 prep_queue_for_rx(qi);
973 /* Fire the Lazy Gun - get as many packets as we can in one go. */
975 packet_count = uml_vector_recvmmsg(
976 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0);
978 if (packet_count < 0)
981 if (packet_count <= 0)
984 /* We treat packet processing as enqueue, buffer refresh as dequeue
985 * The queue_depth tells us how many buffers have been used and how
986 * many do we need to prep the next time prep_queue_for_rx() is called.
989 qi->queue_depth = packet_count;
991 for (i = 0; i < packet_count; i++) {
992 skb = (*skbuff_vector);
993 if (mmsg_vector->msg_len > vp->header_size) {
994 if (vp->header_size > 0) {
995 header_check = vp->verify_header(
996 mmsg_vector->msg_hdr.msg_iov->iov_base,
1000 if (header_check < 0) {
1001 /* Overlay header failed to verify - discard.
1002 * We can actually keep this skb and reuse it,
1003 * but that will make the prep logic too
1006 dev_kfree_skb_irq(skb);
1007 vp->estats.rx_encaps_errors++;
1010 if (header_check > 0) {
1011 vp->estats.rx_csum_offload_good++;
1012 skb->ip_summed = CHECKSUM_UNNECESSARY;
1016 mmsg_vector->msg_len - vp->rx_header_size);
1017 skb->protocol = eth_type_trans(skb, skb->dev);
1019 * We do not need to lock on updating stats here
1020 * The interrupt loop is non-reentrant.
1022 vp->dev->stats.rx_bytes += skb->len;
1023 vp->dev->stats.rx_packets++;
1026 /* Overlay header too short to do anything - discard.
1027 * We can actually keep this skb and reuse it,
1028 * but that will make the prep logic too complex.
1031 dev_kfree_skb_irq(skb);
1033 (*skbuff_vector) = NULL;
1034 /* Move to the next buffer element */
1038 if (packet_count > 0) {
1039 if (vp->estats.rx_queue_max < packet_count)
1040 vp->estats.rx_queue_max = packet_count;
1041 vp->estats.rx_queue_running_average =
1042 (vp->estats.rx_queue_running_average + packet_count) >> 1;
1044 return packet_count;
1047 static void vector_rx(struct vector_private *vp)
1052 if ((vp->options & VECTOR_RX) > 0)
1053 while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1056 while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS))
1058 if ((err != 0) && net_ratelimit())
1059 netdev_err(vp->dev, "vector_rx: error(%d)\n", err);
1060 if (iter == MAX_ITERATIONS)
1061 netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n");
1064 static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
1066 struct vector_private *vp = netdev_priv(dev);
1067 int queue_depth = 0;
1070 deactivate_fd(vp->fds->rx_fd, vp->rx_irq);
1071 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0))
1072 deactivate_fd(vp->fds->tx_fd, vp->tx_irq);
1073 return NETDEV_TX_BUSY;
1076 if ((vp->options & VECTOR_TX) == 0) {
1078 return NETDEV_TX_OK;
1081 /* We do BQL only in the vector path, no point doing it in
1082 * packet at a time mode as there is no device queue
1085 netdev_sent_queue(vp->dev, skb->len);
1086 queue_depth = vector_enqueue(vp->tx_queue, skb);
1088 /* if the device queue is full, stop the upper layers and
1092 if (queue_depth >= vp->tx_queue->max_depth - 1) {
1093 vp->estats.tx_kicks++;
1094 netif_stop_queue(dev);
1095 vector_send(vp->tx_queue);
1096 return NETDEV_TX_OK;
1098 if (netdev_xmit_more()) {
1099 mod_timer(&vp->tl, vp->coalesce);
1100 return NETDEV_TX_OK;
1102 if (skb->len < TX_SMALL_PACKET) {
1103 vp->estats.tx_kicks++;
1104 vector_send(vp->tx_queue);
1106 tasklet_schedule(&vp->tx_poll);
1107 return NETDEV_TX_OK;
1110 static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
1112 struct net_device *dev = dev_id;
1113 struct vector_private *vp = netdev_priv(dev);
1115 if (!netif_running(dev))
1122 static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
1124 struct net_device *dev = dev_id;
1125 struct vector_private *vp = netdev_priv(dev);
1127 if (!netif_running(dev))
1129 /* We need to pay attention to it only if we got
1130 * -EAGAIN or -ENOBUFFS from sendmmsg. Otherwise
1131 * we ignore it. In the future, it may be worth
1132 * it to improve the IRQ controller a bit to make
1133 * tweaking the IRQ mask less costly
1136 if (vp->in_write_poll)
1137 tasklet_schedule(&vp->tx_poll);
1144 static int vector_net_close(struct net_device *dev)
1146 struct vector_private *vp = netdev_priv(dev);
1147 unsigned long flags;
1149 netif_stop_queue(dev);
1152 if (vp->fds == NULL)
1155 /* Disable and free all IRQS */
1156 if (vp->rx_irq > 0) {
1157 um_free_irq(vp->rx_irq, dev);
1160 if (vp->tx_irq > 0) {
1161 um_free_irq(vp->tx_irq, dev);
1164 tasklet_kill(&vp->tx_poll);
1165 if (vp->fds->rx_fd > 0) {
1167 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1168 os_close_file(vp->fds->rx_fd);
1169 vp->fds->rx_fd = -1;
1171 if (vp->fds->tx_fd > 0) {
1172 os_close_file(vp->fds->tx_fd);
1173 vp->fds->tx_fd = -1;
1175 if (vp->bpf != NULL)
1176 kfree(vp->bpf->filter);
1179 kfree(vp->fds->remote_addr);
1180 kfree(vp->transport_data);
1181 kfree(vp->header_rxbuffer);
1182 kfree(vp->header_txbuffer);
1183 if (vp->rx_queue != NULL)
1184 destroy_queue(vp->rx_queue);
1185 if (vp->tx_queue != NULL)
1186 destroy_queue(vp->tx_queue);
1189 spin_lock_irqsave(&vp->lock, flags);
1191 vp->in_error = false;
1192 spin_unlock_irqrestore(&vp->lock, flags);
1198 static void vector_tx_poll(struct tasklet_struct *t)
1200 struct vector_private *vp = from_tasklet(vp, t, tx_poll);
1202 vp->estats.tx_kicks++;
1203 vector_send(vp->tx_queue);
1205 static void vector_reset_tx(struct work_struct *work)
1207 struct vector_private *vp =
1208 container_of(work, struct vector_private, reset_tx);
1209 netdev_reset_queue(vp->dev);
1210 netif_start_queue(vp->dev);
1211 netif_wake_queue(vp->dev);
1214 static int vector_net_open(struct net_device *dev)
1216 struct vector_private *vp = netdev_priv(dev);
1217 unsigned long flags;
1219 struct vector_device *vdevice;
1221 spin_lock_irqsave(&vp->lock, flags);
1223 spin_unlock_irqrestore(&vp->lock, flags);
1227 spin_unlock_irqrestore(&vp->lock, flags);
1229 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed));
1231 vp->fds = uml_vector_user_open(vp->unit, vp->parsed);
1233 if (vp->fds == NULL)
1236 if (build_transport_data(vp) < 0)
1239 if ((vp->options & VECTOR_RX) > 0) {
1240 vp->rx_queue = create_queue(
1242 get_depth(vp->parsed),
1246 vp->rx_queue->queue_depth = get_depth(vp->parsed);
1248 vp->header_rxbuffer = kmalloc(
1252 if (vp->header_rxbuffer == NULL)
1255 if ((vp->options & VECTOR_TX) > 0) {
1256 vp->tx_queue = create_queue(
1258 get_depth(vp->parsed),
1263 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL);
1264 if (vp->header_txbuffer == NULL)
1269 err = um_request_irq(
1270 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd,
1271 IRQ_READ, vector_rx_interrupt,
1272 IRQF_SHARED, dev->name, dev);
1274 netdev_err(dev, "vector_open: failed to get rx irq(%d)\n", err);
1278 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ;
1279 dev->irq = irq_rr + VECTOR_BASE_IRQ;
1280 irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1282 /* WRITE IRQ - we need it only if we have vector TX */
1283 if ((vp->options & VECTOR_TX) > 0) {
1284 err = um_request_irq(
1285 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd,
1286 IRQ_WRITE, vector_tx_interrupt,
1287 IRQF_SHARED, dev->name, dev);
1290 "vector_open: failed to get tx irq(%d)\n", err);
1294 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ;
1295 irq_rr = (irq_rr + 1) % VECTOR_IRQ_SPACE;
1298 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
1299 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
1300 vp->options |= VECTOR_BPF;
1302 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL))
1303 vp->bpf = uml_vector_default_bpf(dev->dev_addr);
1305 if (vp->bpf != NULL)
1306 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1308 netif_start_queue(dev);
1310 /* clear buffer - it can happen that the host side of the interface
1311 * is full when we get here. In this case, new data is never queued,
1312 * SIGIOs never arrive, and the net never works.
1317 vector_reset_stats(vp);
1318 vdevice = find_device(vp->unit);
1319 vdevice->opened = 1;
1321 if ((vp->options & VECTOR_TX) != 0)
1325 vector_net_close(dev);
1330 static void vector_net_set_multicast_list(struct net_device *dev)
1332 /* TODO: - we can do some BPF games here */
1336 static void vector_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
1338 struct vector_private *vp = netdev_priv(dev);
1340 vp->estats.tx_timeout_count++;
1341 netif_trans_update(dev);
1342 schedule_work(&vp->reset_tx);
1345 static netdev_features_t vector_fix_features(struct net_device *dev,
1346 netdev_features_t features)
1348 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
1352 static int vector_set_features(struct net_device *dev,
1353 netdev_features_t features)
1355 struct vector_private *vp = netdev_priv(dev);
1356 /* Adjust buffer sizes for GSO/GRO. Unfortunately, there is
1357 * no way to negotiate it on raw sockets, so we can change
1360 if (features & NETIF_F_GRO)
1361 /* All new frame buffers will be GRO-sized */
1362 vp->req_size = 65536;
1364 /* All new frame buffers will be normal sized */
1365 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN;
1369 #ifdef CONFIG_NET_POLL_CONTROLLER
1370 static void vector_net_poll_controller(struct net_device *dev)
1372 disable_irq(dev->irq);
1373 vector_rx_interrupt(dev->irq, dev);
1374 enable_irq(dev->irq);
1378 static void vector_net_get_drvinfo(struct net_device *dev,
1379 struct ethtool_drvinfo *info)
1381 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1384 static int vector_net_load_bpf_flash(struct net_device *dev,
1385 struct ethtool_flash *efl)
1387 struct vector_private *vp = netdev_priv(dev);
1388 struct vector_device *vdevice;
1389 const struct firmware *fw;
1392 if (!(vp->options & VECTOR_BPF_FLASH)) {
1393 netdev_err(dev, "loading firmware not permitted: %s\n", efl->data);
1397 spin_lock(&vp->lock);
1399 if (vp->bpf != NULL) {
1401 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf);
1402 kfree(vp->bpf->filter);
1403 vp->bpf->filter = NULL;
1405 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC);
1406 if (vp->bpf == NULL) {
1407 netdev_err(dev, "failed to allocate memory for firmware\n");
1412 vdevice = find_device(vp->unit);
1414 if (request_firmware(&fw, efl->data, &vdevice->pdev.dev))
1417 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC);
1418 if (!vp->bpf->filter)
1421 vp->bpf->len = fw->size / sizeof(struct sock_filter);
1422 release_firmware(fw);
1425 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf);
1427 spin_unlock(&vp->lock);
1432 release_firmware(fw);
1435 spin_unlock(&vp->lock);
1436 if (vp->bpf != NULL)
1437 kfree(vp->bpf->filter);
1443 static void vector_get_ringparam(struct net_device *netdev,
1444 struct ethtool_ringparam *ring)
1446 struct vector_private *vp = netdev_priv(netdev);
1448 ring->rx_max_pending = vp->rx_queue->max_depth;
1449 ring->tx_max_pending = vp->tx_queue->max_depth;
1450 ring->rx_pending = vp->rx_queue->max_depth;
1451 ring->tx_pending = vp->tx_queue->max_depth;
1454 static void vector_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1456 switch (stringset) {
1461 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1469 static int vector_get_sset_count(struct net_device *dev, int sset)
1475 return VECTOR_NUM_STATS;
1481 static void vector_get_ethtool_stats(struct net_device *dev,
1482 struct ethtool_stats *estats,
1485 struct vector_private *vp = netdev_priv(dev);
1487 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats));
1490 static int vector_get_coalesce(struct net_device *netdev,
1491 struct ethtool_coalesce *ec)
1493 struct vector_private *vp = netdev_priv(netdev);
1495 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ;
1499 static int vector_set_coalesce(struct net_device *netdev,
1500 struct ethtool_coalesce *ec)
1502 struct vector_private *vp = netdev_priv(netdev);
1504 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000;
1505 if (vp->coalesce == 0)
1510 static const struct ethtool_ops vector_net_ethtool_ops = {
1511 .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS,
1512 .get_drvinfo = vector_net_get_drvinfo,
1513 .get_link = ethtool_op_get_link,
1514 .get_ts_info = ethtool_op_get_ts_info,
1515 .get_ringparam = vector_get_ringparam,
1516 .get_strings = vector_get_strings,
1517 .get_sset_count = vector_get_sset_count,
1518 .get_ethtool_stats = vector_get_ethtool_stats,
1519 .get_coalesce = vector_get_coalesce,
1520 .set_coalesce = vector_set_coalesce,
1521 .flash_device = vector_net_load_bpf_flash,
1525 static const struct net_device_ops vector_netdev_ops = {
1526 .ndo_open = vector_net_open,
1527 .ndo_stop = vector_net_close,
1528 .ndo_start_xmit = vector_net_start_xmit,
1529 .ndo_set_rx_mode = vector_net_set_multicast_list,
1530 .ndo_tx_timeout = vector_net_tx_timeout,
1531 .ndo_set_mac_address = eth_mac_addr,
1532 .ndo_validate_addr = eth_validate_addr,
1533 .ndo_fix_features = vector_fix_features,
1534 .ndo_set_features = vector_set_features,
1535 #ifdef CONFIG_NET_POLL_CONTROLLER
1536 .ndo_poll_controller = vector_net_poll_controller,
1541 static void vector_timer_expire(struct timer_list *t)
1543 struct vector_private *vp = from_timer(vp, t, tl);
1545 vp->estats.tx_kicks++;
1546 vector_send(vp->tx_queue);
1549 static void vector_eth_configure(
1554 struct vector_device *device;
1555 struct net_device *dev;
1556 struct vector_private *vp;
1559 device = kzalloc(sizeof(*device), GFP_KERNEL);
1560 if (device == NULL) {
1561 printk(KERN_ERR "eth_configure failed to allocate struct "
1565 dev = alloc_etherdev(sizeof(struct vector_private));
1567 printk(KERN_ERR "eth_configure: failed to allocate struct "
1568 "net_device for vec%d\n", n);
1569 goto out_free_device;
1572 dev->mtu = get_mtu(def);
1574 INIT_LIST_HEAD(&device->list);
1577 /* If this name ends up conflicting with an existing registered
1578 * netdevice, that is OK, register_netdev{,ice}() will notice this
1581 snprintf(dev->name, sizeof(dev->name), "vec%d", n);
1582 uml_net_setup_etheraddr(dev, uml_vector_fetch_arg(def, "mac"));
1583 vp = netdev_priv(dev);
1585 /* sysfs register */
1586 if (!driver_registered) {
1587 platform_driver_register(¨_net_driver);
1588 driver_registered = 1;
1590 device->pdev.id = n;
1591 device->pdev.name = DRIVER_NAME;
1592 device->pdev.dev.release = vector_device_release;
1593 dev_set_drvdata(&device->pdev.dev, device);
1594 if (platform_device_register(&device->pdev))
1595 goto out_free_netdev;
1596 SET_NETDEV_DEV(dev, &device->pdev.dev);
1600 *vp = ((struct vector_private)
1602 .list = LIST_HEAD_INIT(vp->list),
1605 .options = get_transport_options(def),
1609 .max_packet = get_mtu(def) + ETH_HEADER_OTHER,
1610 /* TODO - we need to calculate headroom so that ip header
1611 * is 16 byte aligned all the time
1613 .headroom = get_headroom(def),
1614 .form_header = NULL,
1615 .verify_header = NULL,
1616 .header_rxbuffer = NULL,
1617 .header_txbuffer = NULL,
1619 .rx_header_size = 0,
1620 .rexmit_scheduled = false,
1622 .transport_data = NULL,
1623 .in_write_poll = false,
1625 .req_size = get_req_size(def),
1630 dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
1631 tasklet_setup(&vp->tx_poll, vector_tx_poll);
1632 INIT_WORK(&vp->reset_tx, vector_reset_tx);
1634 timer_setup(&vp->tl, vector_timer_expire, 0);
1635 spin_lock_init(&vp->lock);
1638 dev->netdev_ops = &vector_netdev_ops;
1639 dev->ethtool_ops = &vector_net_ethtool_ops;
1640 dev->watchdog_timeo = (HZ >> 1);
1641 /* primary IRQ - fixme */
1642 dev->irq = 0; /* we will adjust this once opened */
1645 err = register_netdevice(dev);
1648 goto out_undo_user_init;
1650 spin_lock(&vector_devices_lock);
1651 list_add(&device->list, &vector_devices);
1652 spin_unlock(&vector_devices_lock);
1668 * Invoked late in the init
1671 static int __init vector_init(void)
1673 struct list_head *ele;
1674 struct vector_cmd_line_arg *def;
1675 struct arglist *parsed;
1677 list_for_each(ele, &vec_cmd_line) {
1678 def = list_entry(ele, struct vector_cmd_line_arg, list);
1679 parsed = uml_parse_vector_ifspec(def->arguments);
1681 vector_eth_configure(def->unit, parsed);
1687 /* Invoked at initial argument parsing, only stores
1688 * arguments until a proper vector_init is called
1692 static int __init vector_setup(char *str)
1696 struct vector_cmd_line_arg *new;
1698 err = vector_parse(str, &n, &str, &error);
1700 printk(KERN_ERR "vector_setup - Couldn't parse '%s' : %s\n",
1704 new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES);
1706 panic("%s: Failed to allocate %zu bytes\n", __func__,
1708 INIT_LIST_HEAD(&new->list);
1710 new->arguments = str;
1711 list_add_tail(&new->list, &vec_cmd_line);
1715 __setup("vec", vector_setup);
1716 __uml_help(vector_setup,
1717 "vec[0-9]+:<option>=<value>,<option>=<value>\n"
1718 " Configure a vector io network device.\n\n"
1721 late_initcall(vector_init);
1723 static struct mc_device vector_mc = {
1724 .list = LIST_HEAD_INIT(vector_mc.list),
1726 .config = vector_config,
1729 .remove = vector_remove,
1733 static int vector_inetaddr_event(
1734 struct notifier_block *this,
1735 unsigned long event,
1741 static struct notifier_block vector_inetaddr_notifier = {
1742 .notifier_call = vector_inetaddr_event,
1745 static void inet_register(void)
1747 register_inetaddr_notifier(&vector_inetaddr_notifier);
1750 static inline void inet_register(void)
1755 static int vector_net_init(void)
1757 mconsole_register_dev(&vector_mc);
1762 __initcall(vector_net_init);