Merge tag 'locking-debug-2021-09-01' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67                  "Maximum number of queues per virtual interface");
68
69 #define XENNET_TIMEOUT  (5 * HZ)
70
71 static const struct ethtool_ops xennet_ethtool_ops;
72
73 struct netfront_cb {
74         int pull_to;
75 };
76
77 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
78
79 #define RX_COPY_THRESHOLD 256
80
81 #define GRANT_INVALID_REF       0
82
83 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
84 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
85
86 /* Minimum number of Rx slots (includes slot for GSO metadata). */
87 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
88
89 /* Queue name is interface name with "-qNNN" appended */
90 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
91
92 /* IRQ name is queue name with "-tx" or "-rx" appended */
93 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
94
95 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
96
97 struct netfront_stats {
98         u64                     packets;
99         u64                     bytes;
100         struct u64_stats_sync   syncp;
101 };
102
103 struct netfront_info;
104
105 struct netfront_queue {
106         unsigned int id; /* Queue ID, 0-based */
107         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
108         struct netfront_info *info;
109
110         struct bpf_prog __rcu *xdp_prog;
111
112         struct napi_struct napi;
113
114         /* Split event channels support, tx_* == rx_* when using
115          * single event channel.
116          */
117         unsigned int tx_evtchn, rx_evtchn;
118         unsigned int tx_irq, rx_irq;
119         /* Only used when split event channels support is enabled */
120         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
121         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
122
123         spinlock_t   tx_lock;
124         struct xen_netif_tx_front_ring tx;
125         int tx_ring_ref;
126
127         /*
128          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
129          * are linked from tx_skb_freelist through tx_link.
130          */
131         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
132         unsigned short tx_link[NET_TX_RING_SIZE];
133 #define TX_LINK_NONE 0xffff
134 #define TX_PENDING   0xfffe
135         grant_ref_t gref_tx_head;
136         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
137         struct page *grant_tx_page[NET_TX_RING_SIZE];
138         unsigned tx_skb_freelist;
139         unsigned int tx_pend_queue;
140
141         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
142         struct xen_netif_rx_front_ring rx;
143         int rx_ring_ref;
144
145         struct timer_list rx_refill_timer;
146
147         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
148         grant_ref_t gref_rx_head;
149         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
150
151         struct page_pool *page_pool;
152         struct xdp_rxq_info xdp_rxq;
153 };
154
155 struct netfront_info {
156         struct list_head list;
157         struct net_device *netdev;
158
159         struct xenbus_device *xbdev;
160
161         /* Multi-queue support */
162         struct netfront_queue *queues;
163
164         /* Statistics */
165         struct netfront_stats __percpu *rx_stats;
166         struct netfront_stats __percpu *tx_stats;
167
168         /* XDP state */
169         bool netback_has_xdp_headroom;
170         bool netfront_xdp_enabled;
171
172         /* Is device behaving sane? */
173         bool broken;
174
175         atomic_t rx_gso_checksum_fixup;
176 };
177
178 struct netfront_rx_info {
179         struct xen_netif_rx_response rx;
180         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
181 };
182
183 /*
184  * Access macros for acquiring freeing slots in tx_skbs[].
185  */
186
187 static void add_id_to_list(unsigned *head, unsigned short *list,
188                            unsigned short id)
189 {
190         list[id] = *head;
191         *head = id;
192 }
193
194 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
195 {
196         unsigned int id = *head;
197
198         if (id != TX_LINK_NONE) {
199                 *head = list[id];
200                 list[id] = TX_LINK_NONE;
201         }
202         return id;
203 }
204
205 static int xennet_rxidx(RING_IDX idx)
206 {
207         return idx & (NET_RX_RING_SIZE - 1);
208 }
209
210 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
211                                          RING_IDX ri)
212 {
213         int i = xennet_rxidx(ri);
214         struct sk_buff *skb = queue->rx_skbs[i];
215         queue->rx_skbs[i] = NULL;
216         return skb;
217 }
218
219 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
220                                             RING_IDX ri)
221 {
222         int i = xennet_rxidx(ri);
223         grant_ref_t ref = queue->grant_rx_ref[i];
224         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
225         return ref;
226 }
227
228 #ifdef CONFIG_SYSFS
229 static const struct attribute_group xennet_dev_group;
230 #endif
231
232 static bool xennet_can_sg(struct net_device *dev)
233 {
234         return dev->features & NETIF_F_SG;
235 }
236
237
238 static void rx_refill_timeout(struct timer_list *t)
239 {
240         struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
241         napi_schedule(&queue->napi);
242 }
243
244 static int netfront_tx_slot_available(struct netfront_queue *queue)
245 {
246         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
247                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
248 }
249
250 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
251 {
252         struct net_device *dev = queue->info->netdev;
253         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
254
255         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
256             netfront_tx_slot_available(queue) &&
257             likely(netif_running(dev)))
258                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
259 }
260
261
262 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
263 {
264         struct sk_buff *skb;
265         struct page *page;
266
267         skb = __netdev_alloc_skb(queue->info->netdev,
268                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
269                                  GFP_ATOMIC | __GFP_NOWARN);
270         if (unlikely(!skb))
271                 return NULL;
272
273         page = page_pool_dev_alloc_pages(queue->page_pool);
274         if (unlikely(!page)) {
275                 kfree_skb(skb);
276                 return NULL;
277         }
278         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
279
280         /* Align ip header to a 16 bytes boundary */
281         skb_reserve(skb, NET_IP_ALIGN);
282         skb->dev = queue->info->netdev;
283
284         return skb;
285 }
286
287
288 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
289 {
290         RING_IDX req_prod = queue->rx.req_prod_pvt;
291         int notify;
292         int err = 0;
293
294         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
295                 return;
296
297         for (req_prod = queue->rx.req_prod_pvt;
298              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
299              req_prod++) {
300                 struct sk_buff *skb;
301                 unsigned short id;
302                 grant_ref_t ref;
303                 struct page *page;
304                 struct xen_netif_rx_request *req;
305
306                 skb = xennet_alloc_one_rx_buffer(queue);
307                 if (!skb) {
308                         err = -ENOMEM;
309                         break;
310                 }
311
312                 id = xennet_rxidx(req_prod);
313
314                 BUG_ON(queue->rx_skbs[id]);
315                 queue->rx_skbs[id] = skb;
316
317                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
318                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
319                 queue->grant_rx_ref[id] = ref;
320
321                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
322
323                 req = RING_GET_REQUEST(&queue->rx, req_prod);
324                 gnttab_page_grant_foreign_access_ref_one(ref,
325                                                          queue->info->xbdev->otherend_id,
326                                                          page,
327                                                          0);
328                 req->id = id;
329                 req->gref = ref;
330         }
331
332         queue->rx.req_prod_pvt = req_prod;
333
334         /* Try again later if there are not enough requests or skb allocation
335          * failed.
336          * Enough requests is quantified as the sum of newly created slots and
337          * the unconsumed slots at the backend.
338          */
339         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
340             unlikely(err)) {
341                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
342                 return;
343         }
344
345         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
346         if (notify)
347                 notify_remote_via_irq(queue->rx_irq);
348 }
349
350 static int xennet_open(struct net_device *dev)
351 {
352         struct netfront_info *np = netdev_priv(dev);
353         unsigned int num_queues = dev->real_num_tx_queues;
354         unsigned int i = 0;
355         struct netfront_queue *queue = NULL;
356
357         if (!np->queues || np->broken)
358                 return -ENODEV;
359
360         for (i = 0; i < num_queues; ++i) {
361                 queue = &np->queues[i];
362                 napi_enable(&queue->napi);
363
364                 spin_lock_bh(&queue->rx_lock);
365                 if (netif_carrier_ok(dev)) {
366                         xennet_alloc_rx_buffers(queue);
367                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
368                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
369                                 napi_schedule(&queue->napi);
370                 }
371                 spin_unlock_bh(&queue->rx_lock);
372         }
373
374         netif_tx_start_all_queues(dev);
375
376         return 0;
377 }
378
379 static void xennet_tx_buf_gc(struct netfront_queue *queue)
380 {
381         RING_IDX cons, prod;
382         unsigned short id;
383         struct sk_buff *skb;
384         bool more_to_do;
385         const struct device *dev = &queue->info->netdev->dev;
386
387         BUG_ON(!netif_carrier_ok(queue->info->netdev));
388
389         do {
390                 prod = queue->tx.sring->rsp_prod;
391                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
392                         dev_alert(dev, "Illegal number of responses %u\n",
393                                   prod - queue->tx.rsp_cons);
394                         goto err;
395                 }
396                 rmb(); /* Ensure we see responses up to 'rp'. */
397
398                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
399                         struct xen_netif_tx_response txrsp;
400
401                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
402                         if (txrsp.status == XEN_NETIF_RSP_NULL)
403                                 continue;
404
405                         id = txrsp.id;
406                         if (id >= RING_SIZE(&queue->tx)) {
407                                 dev_alert(dev,
408                                           "Response has incorrect id (%u)\n",
409                                           id);
410                                 goto err;
411                         }
412                         if (queue->tx_link[id] != TX_PENDING) {
413                                 dev_alert(dev,
414                                           "Response for inactive request\n");
415                                 goto err;
416                         }
417
418                         queue->tx_link[id] = TX_LINK_NONE;
419                         skb = queue->tx_skbs[id];
420                         queue->tx_skbs[id] = NULL;
421                         if (unlikely(gnttab_query_foreign_access(
422                                 queue->grant_tx_ref[id]) != 0)) {
423                                 dev_alert(dev,
424                                           "Grant still in use by backend domain\n");
425                                 goto err;
426                         }
427                         gnttab_end_foreign_access_ref(
428                                 queue->grant_tx_ref[id], GNTMAP_readonly);
429                         gnttab_release_grant_reference(
430                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
431                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
432                         queue->grant_tx_page[id] = NULL;
433                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
434                         dev_kfree_skb_irq(skb);
435                 }
436
437                 queue->tx.rsp_cons = prod;
438
439                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
440         } while (more_to_do);
441
442         xennet_maybe_wake_tx(queue);
443
444         return;
445
446  err:
447         queue->info->broken = true;
448         dev_alert(dev, "Disabled for further use\n");
449 }
450
451 struct xennet_gnttab_make_txreq {
452         struct netfront_queue *queue;
453         struct sk_buff *skb;
454         struct page *page;
455         struct xen_netif_tx_request *tx;      /* Last request on ring page */
456         struct xen_netif_tx_request tx_local; /* Last request local copy*/
457         unsigned int size;
458 };
459
460 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
461                                   unsigned int len, void *data)
462 {
463         struct xennet_gnttab_make_txreq *info = data;
464         unsigned int id;
465         struct xen_netif_tx_request *tx;
466         grant_ref_t ref;
467         /* convenient aliases */
468         struct page *page = info->page;
469         struct netfront_queue *queue = info->queue;
470         struct sk_buff *skb = info->skb;
471
472         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
473         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
474         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
475         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
476
477         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
478                                         gfn, GNTMAP_readonly);
479
480         queue->tx_skbs[id] = skb;
481         queue->grant_tx_page[id] = page;
482         queue->grant_tx_ref[id] = ref;
483
484         info->tx_local.id = id;
485         info->tx_local.gref = ref;
486         info->tx_local.offset = offset;
487         info->tx_local.size = len;
488         info->tx_local.flags = 0;
489
490         *tx = info->tx_local;
491
492         /*
493          * Put the request in the pending queue, it will be set to be pending
494          * when the producer index is about to be raised.
495          */
496         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
497
498         info->tx = tx;
499         info->size += info->tx_local.size;
500 }
501
502 static struct xen_netif_tx_request *xennet_make_first_txreq(
503         struct xennet_gnttab_make_txreq *info,
504         unsigned int offset, unsigned int len)
505 {
506         info->size = 0;
507
508         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
509
510         return info->tx;
511 }
512
513 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
514                                   unsigned int len, void *data)
515 {
516         struct xennet_gnttab_make_txreq *info = data;
517
518         info->tx->flags |= XEN_NETTXF_more_data;
519         skb_get(info->skb);
520         xennet_tx_setup_grant(gfn, offset, len, data);
521 }
522
523 static void xennet_make_txreqs(
524         struct xennet_gnttab_make_txreq *info,
525         struct page *page,
526         unsigned int offset, unsigned int len)
527 {
528         /* Skip unused frames from start of page */
529         page += offset >> PAGE_SHIFT;
530         offset &= ~PAGE_MASK;
531
532         while (len) {
533                 info->page = page;
534                 info->size = 0;
535
536                 gnttab_foreach_grant_in_range(page, offset, len,
537                                               xennet_make_one_txreq,
538                                               info);
539
540                 page++;
541                 offset = 0;
542                 len -= info->size;
543         }
544 }
545
546 /*
547  * Count how many ring slots are required to send this skb. Each frag
548  * might be a compound page.
549  */
550 static int xennet_count_skb_slots(struct sk_buff *skb)
551 {
552         int i, frags = skb_shinfo(skb)->nr_frags;
553         int slots;
554
555         slots = gnttab_count_grant(offset_in_page(skb->data),
556                                    skb_headlen(skb));
557
558         for (i = 0; i < frags; i++) {
559                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
560                 unsigned long size = skb_frag_size(frag);
561                 unsigned long offset = skb_frag_off(frag);
562
563                 /* Skip unused frames from start of page */
564                 offset &= ~PAGE_MASK;
565
566                 slots += gnttab_count_grant(offset, size);
567         }
568
569         return slots;
570 }
571
572 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
573                                struct net_device *sb_dev)
574 {
575         unsigned int num_queues = dev->real_num_tx_queues;
576         u32 hash;
577         u16 queue_idx;
578
579         /* First, check if there is only one queue */
580         if (num_queues == 1) {
581                 queue_idx = 0;
582         } else {
583                 hash = skb_get_hash(skb);
584                 queue_idx = hash % num_queues;
585         }
586
587         return queue_idx;
588 }
589
590 static void xennet_mark_tx_pending(struct netfront_queue *queue)
591 {
592         unsigned int i;
593
594         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
595                TX_LINK_NONE)
596                 queue->tx_link[i] = TX_PENDING;
597 }
598
599 static int xennet_xdp_xmit_one(struct net_device *dev,
600                                struct netfront_queue *queue,
601                                struct xdp_frame *xdpf)
602 {
603         struct netfront_info *np = netdev_priv(dev);
604         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
605         struct xennet_gnttab_make_txreq info = {
606                 .queue = queue,
607                 .skb = NULL,
608                 .page = virt_to_page(xdpf->data),
609         };
610         int notify;
611
612         xennet_make_first_txreq(&info,
613                                 offset_in_page(xdpf->data),
614                                 xdpf->len);
615
616         xennet_mark_tx_pending(queue);
617
618         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
619         if (notify)
620                 notify_remote_via_irq(queue->tx_irq);
621
622         u64_stats_update_begin(&tx_stats->syncp);
623         tx_stats->bytes += xdpf->len;
624         tx_stats->packets++;
625         u64_stats_update_end(&tx_stats->syncp);
626
627         xennet_tx_buf_gc(queue);
628
629         return 0;
630 }
631
632 static int xennet_xdp_xmit(struct net_device *dev, int n,
633                            struct xdp_frame **frames, u32 flags)
634 {
635         unsigned int num_queues = dev->real_num_tx_queues;
636         struct netfront_info *np = netdev_priv(dev);
637         struct netfront_queue *queue = NULL;
638         unsigned long irq_flags;
639         int nxmit = 0;
640         int i;
641
642         if (unlikely(np->broken))
643                 return -ENODEV;
644         if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
645                 return -EINVAL;
646
647         queue = &np->queues[smp_processor_id() % num_queues];
648
649         spin_lock_irqsave(&queue->tx_lock, irq_flags);
650         for (i = 0; i < n; i++) {
651                 struct xdp_frame *xdpf = frames[i];
652
653                 if (!xdpf)
654                         continue;
655                 if (xennet_xdp_xmit_one(dev, queue, xdpf))
656                         break;
657                 nxmit++;
658         }
659         spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
660
661         return nxmit;
662 }
663
664
665 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
666
667 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
668 {
669         struct netfront_info *np = netdev_priv(dev);
670         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
671         struct xen_netif_tx_request *first_tx;
672         unsigned int i;
673         int notify;
674         int slots;
675         struct page *page;
676         unsigned int offset;
677         unsigned int len;
678         unsigned long flags;
679         struct netfront_queue *queue = NULL;
680         struct xennet_gnttab_make_txreq info = { };
681         unsigned int num_queues = dev->real_num_tx_queues;
682         u16 queue_index;
683         struct sk_buff *nskb;
684
685         /* Drop the packet if no queues are set up */
686         if (num_queues < 1)
687                 goto drop;
688         if (unlikely(np->broken))
689                 goto drop;
690         /* Determine which queue to transmit this SKB on */
691         queue_index = skb_get_queue_mapping(skb);
692         queue = &np->queues[queue_index];
693
694         /* If skb->len is too big for wire format, drop skb and alert
695          * user about misconfiguration.
696          */
697         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
698                 net_alert_ratelimited(
699                         "xennet: skb->len = %u, too big for wire format\n",
700                         skb->len);
701                 goto drop;
702         }
703
704         slots = xennet_count_skb_slots(skb);
705         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
706                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
707                                     slots, skb->len);
708                 if (skb_linearize(skb))
709                         goto drop;
710         }
711
712         page = virt_to_page(skb->data);
713         offset = offset_in_page(skb->data);
714
715         /* The first req should be at least ETH_HLEN size or the packet will be
716          * dropped by netback.
717          */
718         if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
719                 nskb = skb_copy(skb, GFP_ATOMIC);
720                 if (!nskb)
721                         goto drop;
722                 dev_consume_skb_any(skb);
723                 skb = nskb;
724                 page = virt_to_page(skb->data);
725                 offset = offset_in_page(skb->data);
726         }
727
728         len = skb_headlen(skb);
729
730         spin_lock_irqsave(&queue->tx_lock, flags);
731
732         if (unlikely(!netif_carrier_ok(dev) ||
733                      (slots > 1 && !xennet_can_sg(dev)) ||
734                      netif_needs_gso(skb, netif_skb_features(skb)))) {
735                 spin_unlock_irqrestore(&queue->tx_lock, flags);
736                 goto drop;
737         }
738
739         /* First request for the linear area. */
740         info.queue = queue;
741         info.skb = skb;
742         info.page = page;
743         first_tx = xennet_make_first_txreq(&info, offset, len);
744         offset += info.tx_local.size;
745         if (offset == PAGE_SIZE) {
746                 page++;
747                 offset = 0;
748         }
749         len -= info.tx_local.size;
750
751         if (skb->ip_summed == CHECKSUM_PARTIAL)
752                 /* local packet? */
753                 first_tx->flags |= XEN_NETTXF_csum_blank |
754                                    XEN_NETTXF_data_validated;
755         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
756                 /* remote but checksummed. */
757                 first_tx->flags |= XEN_NETTXF_data_validated;
758
759         /* Optional extra info after the first request. */
760         if (skb_shinfo(skb)->gso_size) {
761                 struct xen_netif_extra_info *gso;
762
763                 gso = (struct xen_netif_extra_info *)
764                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
765
766                 first_tx->flags |= XEN_NETTXF_extra_info;
767
768                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
769                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
770                         XEN_NETIF_GSO_TYPE_TCPV6 :
771                         XEN_NETIF_GSO_TYPE_TCPV4;
772                 gso->u.gso.pad = 0;
773                 gso->u.gso.features = 0;
774
775                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
776                 gso->flags = 0;
777         }
778
779         /* Requests for the rest of the linear area. */
780         xennet_make_txreqs(&info, page, offset, len);
781
782         /* Requests for all the frags. */
783         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
784                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
785                 xennet_make_txreqs(&info, skb_frag_page(frag),
786                                         skb_frag_off(frag),
787                                         skb_frag_size(frag));
788         }
789
790         /* First request has the packet length. */
791         first_tx->size = skb->len;
792
793         /* timestamp packet in software */
794         skb_tx_timestamp(skb);
795
796         xennet_mark_tx_pending(queue);
797
798         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
799         if (notify)
800                 notify_remote_via_irq(queue->tx_irq);
801
802         u64_stats_update_begin(&tx_stats->syncp);
803         tx_stats->bytes += skb->len;
804         tx_stats->packets++;
805         u64_stats_update_end(&tx_stats->syncp);
806
807         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
808         xennet_tx_buf_gc(queue);
809
810         if (!netfront_tx_slot_available(queue))
811                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
812
813         spin_unlock_irqrestore(&queue->tx_lock, flags);
814
815         return NETDEV_TX_OK;
816
817  drop:
818         dev->stats.tx_dropped++;
819         dev_kfree_skb_any(skb);
820         return NETDEV_TX_OK;
821 }
822
823 static int xennet_close(struct net_device *dev)
824 {
825         struct netfront_info *np = netdev_priv(dev);
826         unsigned int num_queues = dev->real_num_tx_queues;
827         unsigned int i;
828         struct netfront_queue *queue;
829         netif_tx_stop_all_queues(np->netdev);
830         for (i = 0; i < num_queues; ++i) {
831                 queue = &np->queues[i];
832                 napi_disable(&queue->napi);
833         }
834         return 0;
835 }
836
837 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
838                                 grant_ref_t ref)
839 {
840         int new = xennet_rxidx(queue->rx.req_prod_pvt);
841
842         BUG_ON(queue->rx_skbs[new]);
843         queue->rx_skbs[new] = skb;
844         queue->grant_rx_ref[new] = ref;
845         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
846         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
847         queue->rx.req_prod_pvt++;
848 }
849
850 static int xennet_get_extras(struct netfront_queue *queue,
851                              struct xen_netif_extra_info *extras,
852                              RING_IDX rp)
853
854 {
855         struct xen_netif_extra_info extra;
856         struct device *dev = &queue->info->netdev->dev;
857         RING_IDX cons = queue->rx.rsp_cons;
858         int err = 0;
859
860         do {
861                 struct sk_buff *skb;
862                 grant_ref_t ref;
863
864                 if (unlikely(cons + 1 == rp)) {
865                         if (net_ratelimit())
866                                 dev_warn(dev, "Missing extra info\n");
867                         err = -EBADR;
868                         break;
869                 }
870
871                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
872
873                 if (unlikely(!extra.type ||
874                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
875                         if (net_ratelimit())
876                                 dev_warn(dev, "Invalid extra type: %d\n",
877                                          extra.type);
878                         err = -EINVAL;
879                 } else {
880                         extras[extra.type - 1] = extra;
881                 }
882
883                 skb = xennet_get_rx_skb(queue, cons);
884                 ref = xennet_get_rx_ref(queue, cons);
885                 xennet_move_rx_slot(queue, skb, ref);
886         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
887
888         queue->rx.rsp_cons = cons;
889         return err;
890 }
891
892 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
893                    struct xen_netif_rx_response *rx, struct bpf_prog *prog,
894                    struct xdp_buff *xdp, bool *need_xdp_flush)
895 {
896         struct xdp_frame *xdpf;
897         u32 len = rx->status;
898         u32 act;
899         int err;
900
901         xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
902                       &queue->xdp_rxq);
903         xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
904                          len, false);
905
906         act = bpf_prog_run_xdp(prog, xdp);
907         switch (act) {
908         case XDP_TX:
909                 get_page(pdata);
910                 xdpf = xdp_convert_buff_to_frame(xdp);
911                 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
912                 if (unlikely(!err))
913                         xdp_return_frame_rx_napi(xdpf);
914                 else if (unlikely(err < 0))
915                         trace_xdp_exception(queue->info->netdev, prog, act);
916                 break;
917         case XDP_REDIRECT:
918                 get_page(pdata);
919                 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
920                 *need_xdp_flush = true;
921                 if (unlikely(err))
922                         trace_xdp_exception(queue->info->netdev, prog, act);
923                 break;
924         case XDP_PASS:
925         case XDP_DROP:
926                 break;
927
928         case XDP_ABORTED:
929                 trace_xdp_exception(queue->info->netdev, prog, act);
930                 break;
931
932         default:
933                 bpf_warn_invalid_xdp_action(act);
934         }
935
936         return act;
937 }
938
939 static int xennet_get_responses(struct netfront_queue *queue,
940                                 struct netfront_rx_info *rinfo, RING_IDX rp,
941                                 struct sk_buff_head *list,
942                                 bool *need_xdp_flush)
943 {
944         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
945         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
946         RING_IDX cons = queue->rx.rsp_cons;
947         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
948         struct xen_netif_extra_info *extras = rinfo->extras;
949         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
950         struct device *dev = &queue->info->netdev->dev;
951         struct bpf_prog *xdp_prog;
952         struct xdp_buff xdp;
953         unsigned long ret;
954         int slots = 1;
955         int err = 0;
956         u32 verdict;
957
958         if (rx->flags & XEN_NETRXF_extra_info) {
959                 err = xennet_get_extras(queue, extras, rp);
960                 if (!err) {
961                         if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
962                                 struct xen_netif_extra_info *xdp;
963
964                                 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
965                                 rx->offset = xdp->u.xdp.headroom;
966                         }
967                 }
968                 cons = queue->rx.rsp_cons;
969         }
970
971         for (;;) {
972                 if (unlikely(rx->status < 0 ||
973                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
974                         if (net_ratelimit())
975                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
976                                          rx->offset, rx->status);
977                         xennet_move_rx_slot(queue, skb, ref);
978                         err = -EINVAL;
979                         goto next;
980                 }
981
982                 /*
983                  * This definitely indicates a bug, either in this driver or in
984                  * the backend driver. In future this should flag the bad
985                  * situation to the system controller to reboot the backend.
986                  */
987                 if (ref == GRANT_INVALID_REF) {
988                         if (net_ratelimit())
989                                 dev_warn(dev, "Bad rx response id %d.\n",
990                                          rx->id);
991                         err = -EINVAL;
992                         goto next;
993                 }
994
995                 ret = gnttab_end_foreign_access_ref(ref, 0);
996                 BUG_ON(!ret);
997
998                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
999
1000                 rcu_read_lock();
1001                 xdp_prog = rcu_dereference(queue->xdp_prog);
1002                 if (xdp_prog) {
1003                         if (!(rx->flags & XEN_NETRXF_more_data)) {
1004                                 /* currently only a single page contains data */
1005                                 verdict = xennet_run_xdp(queue,
1006                                                          skb_frag_page(&skb_shinfo(skb)->frags[0]),
1007                                                          rx, xdp_prog, &xdp, need_xdp_flush);
1008                                 if (verdict != XDP_PASS)
1009                                         err = -EINVAL;
1010                         } else {
1011                                 /* drop the frame */
1012                                 err = -EINVAL;
1013                         }
1014                 }
1015                 rcu_read_unlock();
1016 next:
1017                 __skb_queue_tail(list, skb);
1018                 if (!(rx->flags & XEN_NETRXF_more_data))
1019                         break;
1020
1021                 if (cons + slots == rp) {
1022                         if (net_ratelimit())
1023                                 dev_warn(dev, "Need more slots\n");
1024                         err = -ENOENT;
1025                         break;
1026                 }
1027
1028                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1029                 rx = &rx_local;
1030                 skb = xennet_get_rx_skb(queue, cons + slots);
1031                 ref = xennet_get_rx_ref(queue, cons + slots);
1032                 slots++;
1033         }
1034
1035         if (unlikely(slots > max)) {
1036                 if (net_ratelimit())
1037                         dev_warn(dev, "Too many slots\n");
1038                 err = -E2BIG;
1039         }
1040
1041         if (unlikely(err))
1042                 queue->rx.rsp_cons = cons + slots;
1043
1044         return err;
1045 }
1046
1047 static int xennet_set_skb_gso(struct sk_buff *skb,
1048                               struct xen_netif_extra_info *gso)
1049 {
1050         if (!gso->u.gso.size) {
1051                 if (net_ratelimit())
1052                         pr_warn("GSO size must not be zero\n");
1053                 return -EINVAL;
1054         }
1055
1056         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1057             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1058                 if (net_ratelimit())
1059                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1060                 return -EINVAL;
1061         }
1062
1063         skb_shinfo(skb)->gso_size = gso->u.gso.size;
1064         skb_shinfo(skb)->gso_type =
1065                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1066                 SKB_GSO_TCPV4 :
1067                 SKB_GSO_TCPV6;
1068
1069         /* Header must be checked, and gso_segs computed. */
1070         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1071         skb_shinfo(skb)->gso_segs = 0;
1072
1073         return 0;
1074 }
1075
1076 static int xennet_fill_frags(struct netfront_queue *queue,
1077                              struct sk_buff *skb,
1078                              struct sk_buff_head *list)
1079 {
1080         RING_IDX cons = queue->rx.rsp_cons;
1081         struct sk_buff *nskb;
1082
1083         while ((nskb = __skb_dequeue(list))) {
1084                 struct xen_netif_rx_response rx;
1085                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1086
1087                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1088
1089                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1090                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1091
1092                         BUG_ON(pull_to < skb_headlen(skb));
1093                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1094                 }
1095                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1096                         queue->rx.rsp_cons = ++cons + skb_queue_len(list);
1097                         kfree_skb(nskb);
1098                         return -ENOENT;
1099                 }
1100
1101                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1102                                 skb_frag_page(nfrag),
1103                                 rx.offset, rx.status, PAGE_SIZE);
1104
1105                 skb_shinfo(nskb)->nr_frags = 0;
1106                 kfree_skb(nskb);
1107         }
1108
1109         queue->rx.rsp_cons = cons;
1110
1111         return 0;
1112 }
1113
1114 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1115 {
1116         bool recalculate_partial_csum = false;
1117
1118         /*
1119          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1120          * peers can fail to set NETRXF_csum_blank when sending a GSO
1121          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1122          * recalculate the partial checksum.
1123          */
1124         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1125                 struct netfront_info *np = netdev_priv(dev);
1126                 atomic_inc(&np->rx_gso_checksum_fixup);
1127                 skb->ip_summed = CHECKSUM_PARTIAL;
1128                 recalculate_partial_csum = true;
1129         }
1130
1131         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1132         if (skb->ip_summed != CHECKSUM_PARTIAL)
1133                 return 0;
1134
1135         return skb_checksum_setup(skb, recalculate_partial_csum);
1136 }
1137
1138 static int handle_incoming_queue(struct netfront_queue *queue,
1139                                  struct sk_buff_head *rxq)
1140 {
1141         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1142         int packets_dropped = 0;
1143         struct sk_buff *skb;
1144
1145         while ((skb = __skb_dequeue(rxq)) != NULL) {
1146                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1147
1148                 if (pull_to > skb_headlen(skb))
1149                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1150
1151                 /* Ethernet work: Delayed to here as it peeks the header. */
1152                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1153                 skb_reset_network_header(skb);
1154
1155                 if (checksum_setup(queue->info->netdev, skb)) {
1156                         kfree_skb(skb);
1157                         packets_dropped++;
1158                         queue->info->netdev->stats.rx_errors++;
1159                         continue;
1160                 }
1161
1162                 u64_stats_update_begin(&rx_stats->syncp);
1163                 rx_stats->packets++;
1164                 rx_stats->bytes += skb->len;
1165                 u64_stats_update_end(&rx_stats->syncp);
1166
1167                 /* Pass it up. */
1168                 napi_gro_receive(&queue->napi, skb);
1169         }
1170
1171         return packets_dropped;
1172 }
1173
1174 static int xennet_poll(struct napi_struct *napi, int budget)
1175 {
1176         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1177         struct net_device *dev = queue->info->netdev;
1178         struct sk_buff *skb;
1179         struct netfront_rx_info rinfo;
1180         struct xen_netif_rx_response *rx = &rinfo.rx;
1181         struct xen_netif_extra_info *extras = rinfo.extras;
1182         RING_IDX i, rp;
1183         int work_done;
1184         struct sk_buff_head rxq;
1185         struct sk_buff_head errq;
1186         struct sk_buff_head tmpq;
1187         int err;
1188         bool need_xdp_flush = false;
1189
1190         spin_lock(&queue->rx_lock);
1191
1192         skb_queue_head_init(&rxq);
1193         skb_queue_head_init(&errq);
1194         skb_queue_head_init(&tmpq);
1195
1196         rp = queue->rx.sring->rsp_prod;
1197         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1198                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1199                           rp - queue->rx.rsp_cons);
1200                 queue->info->broken = true;
1201                 spin_unlock(&queue->rx_lock);
1202                 return 0;
1203         }
1204         rmb(); /* Ensure we see queued responses up to 'rp'. */
1205
1206         i = queue->rx.rsp_cons;
1207         work_done = 0;
1208         while ((i != rp) && (work_done < budget)) {
1209                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1210                 memset(extras, 0, sizeof(rinfo.extras));
1211
1212                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1213                                            &need_xdp_flush);
1214
1215                 if (unlikely(err)) {
1216 err:
1217                         while ((skb = __skb_dequeue(&tmpq)))
1218                                 __skb_queue_tail(&errq, skb);
1219                         dev->stats.rx_errors++;
1220                         i = queue->rx.rsp_cons;
1221                         continue;
1222                 }
1223
1224                 skb = __skb_dequeue(&tmpq);
1225
1226                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1227                         struct xen_netif_extra_info *gso;
1228                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1229
1230                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1231                                 __skb_queue_head(&tmpq, skb);
1232                                 queue->rx.rsp_cons += skb_queue_len(&tmpq);
1233                                 goto err;
1234                         }
1235                 }
1236
1237                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1238                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1239                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1240
1241                 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1242                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1243                 skb->data_len = rx->status;
1244                 skb->len += rx->status;
1245
1246                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1247                         goto err;
1248
1249                 if (rx->flags & XEN_NETRXF_csum_blank)
1250                         skb->ip_summed = CHECKSUM_PARTIAL;
1251                 else if (rx->flags & XEN_NETRXF_data_validated)
1252                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1253
1254                 __skb_queue_tail(&rxq, skb);
1255
1256                 i = ++queue->rx.rsp_cons;
1257                 work_done++;
1258         }
1259         if (need_xdp_flush)
1260                 xdp_do_flush();
1261
1262         __skb_queue_purge(&errq);
1263
1264         work_done -= handle_incoming_queue(queue, &rxq);
1265
1266         xennet_alloc_rx_buffers(queue);
1267
1268         if (work_done < budget) {
1269                 int more_to_do = 0;
1270
1271                 napi_complete_done(napi, work_done);
1272
1273                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1274                 if (more_to_do)
1275                         napi_schedule(napi);
1276         }
1277
1278         spin_unlock(&queue->rx_lock);
1279
1280         return work_done;
1281 }
1282
1283 static int xennet_change_mtu(struct net_device *dev, int mtu)
1284 {
1285         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1286
1287         if (mtu > max)
1288                 return -EINVAL;
1289         dev->mtu = mtu;
1290         return 0;
1291 }
1292
1293 static void xennet_get_stats64(struct net_device *dev,
1294                                struct rtnl_link_stats64 *tot)
1295 {
1296         struct netfront_info *np = netdev_priv(dev);
1297         int cpu;
1298
1299         for_each_possible_cpu(cpu) {
1300                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1301                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1302                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1303                 unsigned int start;
1304
1305                 do {
1306                         start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1307                         tx_packets = tx_stats->packets;
1308                         tx_bytes = tx_stats->bytes;
1309                 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1310
1311                 do {
1312                         start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1313                         rx_packets = rx_stats->packets;
1314                         rx_bytes = rx_stats->bytes;
1315                 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1316
1317                 tot->rx_packets += rx_packets;
1318                 tot->tx_packets += tx_packets;
1319                 tot->rx_bytes   += rx_bytes;
1320                 tot->tx_bytes   += tx_bytes;
1321         }
1322
1323         tot->rx_errors  = dev->stats.rx_errors;
1324         tot->tx_dropped = dev->stats.tx_dropped;
1325 }
1326
1327 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1328 {
1329         struct sk_buff *skb;
1330         int i;
1331
1332         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1333                 /* Skip over entries which are actually freelist references */
1334                 if (!queue->tx_skbs[i])
1335                         continue;
1336
1337                 skb = queue->tx_skbs[i];
1338                 queue->tx_skbs[i] = NULL;
1339                 get_page(queue->grant_tx_page[i]);
1340                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1341                                           GNTMAP_readonly,
1342                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1343                 queue->grant_tx_page[i] = NULL;
1344                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1345                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1346                 dev_kfree_skb_irq(skb);
1347         }
1348 }
1349
1350 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1351 {
1352         int id, ref;
1353
1354         spin_lock_bh(&queue->rx_lock);
1355
1356         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1357                 struct sk_buff *skb;
1358                 struct page *page;
1359
1360                 skb = queue->rx_skbs[id];
1361                 if (!skb)
1362                         continue;
1363
1364                 ref = queue->grant_rx_ref[id];
1365                 if (ref == GRANT_INVALID_REF)
1366                         continue;
1367
1368                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1369
1370                 /* gnttab_end_foreign_access() needs a page ref until
1371                  * foreign access is ended (which may be deferred).
1372                  */
1373                 get_page(page);
1374                 gnttab_end_foreign_access(ref, 0,
1375                                           (unsigned long)page_address(page));
1376                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1377
1378                 kfree_skb(skb);
1379         }
1380
1381         spin_unlock_bh(&queue->rx_lock);
1382 }
1383
1384 static netdev_features_t xennet_fix_features(struct net_device *dev,
1385         netdev_features_t features)
1386 {
1387         struct netfront_info *np = netdev_priv(dev);
1388
1389         if (features & NETIF_F_SG &&
1390             !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1391                 features &= ~NETIF_F_SG;
1392
1393         if (features & NETIF_F_IPV6_CSUM &&
1394             !xenbus_read_unsigned(np->xbdev->otherend,
1395                                   "feature-ipv6-csum-offload", 0))
1396                 features &= ~NETIF_F_IPV6_CSUM;
1397
1398         if (features & NETIF_F_TSO &&
1399             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1400                 features &= ~NETIF_F_TSO;
1401
1402         if (features & NETIF_F_TSO6 &&
1403             !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1404                 features &= ~NETIF_F_TSO6;
1405
1406         return features;
1407 }
1408
1409 static int xennet_set_features(struct net_device *dev,
1410         netdev_features_t features)
1411 {
1412         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1413                 netdev_info(dev, "Reducing MTU because no SG offload");
1414                 dev->mtu = ETH_DATA_LEN;
1415         }
1416
1417         return 0;
1418 }
1419
1420 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1421 {
1422         struct netfront_queue *queue = dev_id;
1423         unsigned long flags;
1424
1425         if (queue->info->broken)
1426                 return IRQ_HANDLED;
1427
1428         spin_lock_irqsave(&queue->tx_lock, flags);
1429         xennet_tx_buf_gc(queue);
1430         spin_unlock_irqrestore(&queue->tx_lock, flags);
1431
1432         return IRQ_HANDLED;
1433 }
1434
1435 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1436 {
1437         struct netfront_queue *queue = dev_id;
1438         struct net_device *dev = queue->info->netdev;
1439
1440         if (queue->info->broken)
1441                 return IRQ_HANDLED;
1442
1443         if (likely(netif_carrier_ok(dev) &&
1444                    RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1445                 napi_schedule(&queue->napi);
1446
1447         return IRQ_HANDLED;
1448 }
1449
1450 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1451 {
1452         xennet_tx_interrupt(irq, dev_id);
1453         xennet_rx_interrupt(irq, dev_id);
1454         return IRQ_HANDLED;
1455 }
1456
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1458 static void xennet_poll_controller(struct net_device *dev)
1459 {
1460         /* Poll each queue */
1461         struct netfront_info *info = netdev_priv(dev);
1462         unsigned int num_queues = dev->real_num_tx_queues;
1463         unsigned int i;
1464
1465         if (info->broken)
1466                 return;
1467
1468         for (i = 0; i < num_queues; ++i)
1469                 xennet_interrupt(0, &info->queues[i]);
1470 }
1471 #endif
1472
1473 #define NETBACK_XDP_HEADROOM_DISABLE    0
1474 #define NETBACK_XDP_HEADROOM_ENABLE     1
1475
1476 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1477 {
1478         int err;
1479         unsigned short headroom;
1480
1481         headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1482         err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1483                             "xdp-headroom", "%hu",
1484                             headroom);
1485         if (err)
1486                 pr_warn("Error writing xdp-headroom\n");
1487
1488         return err;
1489 }
1490
1491 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1492                           struct netlink_ext_ack *extack)
1493 {
1494         unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1495         struct netfront_info *np = netdev_priv(dev);
1496         struct bpf_prog *old_prog;
1497         unsigned int i, err;
1498
1499         if (dev->mtu > max_mtu) {
1500                 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1501                 return -EINVAL;
1502         }
1503
1504         if (!np->netback_has_xdp_headroom)
1505                 return 0;
1506
1507         xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1508
1509         err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1510                                   NETBACK_XDP_HEADROOM_DISABLE);
1511         if (err)
1512                 return err;
1513
1514         /* avoid the race with XDP headroom adjustment */
1515         wait_event(module_wq,
1516                    xenbus_read_driver_state(np->xbdev->otherend) ==
1517                    XenbusStateReconfigured);
1518         np->netfront_xdp_enabled = true;
1519
1520         old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1521
1522         if (prog)
1523                 bpf_prog_add(prog, dev->real_num_tx_queues);
1524
1525         for (i = 0; i < dev->real_num_tx_queues; ++i)
1526                 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1527
1528         if (old_prog)
1529                 for (i = 0; i < dev->real_num_tx_queues; ++i)
1530                         bpf_prog_put(old_prog);
1531
1532         xenbus_switch_state(np->xbdev, XenbusStateConnected);
1533
1534         return 0;
1535 }
1536
1537 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1538 {
1539         struct netfront_info *np = netdev_priv(dev);
1540
1541         if (np->broken)
1542                 return -ENODEV;
1543
1544         switch (xdp->command) {
1545         case XDP_SETUP_PROG:
1546                 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1547         default:
1548                 return -EINVAL;
1549         }
1550 }
1551
1552 static const struct net_device_ops xennet_netdev_ops = {
1553         .ndo_open            = xennet_open,
1554         .ndo_stop            = xennet_close,
1555         .ndo_start_xmit      = xennet_start_xmit,
1556         .ndo_change_mtu      = xennet_change_mtu,
1557         .ndo_get_stats64     = xennet_get_stats64,
1558         .ndo_set_mac_address = eth_mac_addr,
1559         .ndo_validate_addr   = eth_validate_addr,
1560         .ndo_fix_features    = xennet_fix_features,
1561         .ndo_set_features    = xennet_set_features,
1562         .ndo_select_queue    = xennet_select_queue,
1563         .ndo_bpf            = xennet_xdp,
1564         .ndo_xdp_xmit       = xennet_xdp_xmit,
1565 #ifdef CONFIG_NET_POLL_CONTROLLER
1566         .ndo_poll_controller = xennet_poll_controller,
1567 #endif
1568 };
1569
1570 static void xennet_free_netdev(struct net_device *netdev)
1571 {
1572         struct netfront_info *np = netdev_priv(netdev);
1573
1574         free_percpu(np->rx_stats);
1575         free_percpu(np->tx_stats);
1576         free_netdev(netdev);
1577 }
1578
1579 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1580 {
1581         int err;
1582         struct net_device *netdev;
1583         struct netfront_info *np;
1584
1585         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1586         if (!netdev)
1587                 return ERR_PTR(-ENOMEM);
1588
1589         np                   = netdev_priv(netdev);
1590         np->xbdev            = dev;
1591
1592         np->queues = NULL;
1593
1594         err = -ENOMEM;
1595         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1596         if (np->rx_stats == NULL)
1597                 goto exit;
1598         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1599         if (np->tx_stats == NULL)
1600                 goto exit;
1601
1602         netdev->netdev_ops      = &xennet_netdev_ops;
1603
1604         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1605                                   NETIF_F_GSO_ROBUST;
1606         netdev->hw_features     = NETIF_F_SG |
1607                                   NETIF_F_IPV6_CSUM |
1608                                   NETIF_F_TSO | NETIF_F_TSO6;
1609
1610         /*
1611          * Assume that all hw features are available for now. This set
1612          * will be adjusted by the call to netdev_update_features() in
1613          * xennet_connect() which is the earliest point where we can
1614          * negotiate with the backend regarding supported features.
1615          */
1616         netdev->features |= netdev->hw_features;
1617
1618         netdev->ethtool_ops = &xennet_ethtool_ops;
1619         netdev->min_mtu = ETH_MIN_MTU;
1620         netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1621         SET_NETDEV_DEV(netdev, &dev->dev);
1622
1623         np->netdev = netdev;
1624         np->netfront_xdp_enabled = false;
1625
1626         netif_carrier_off(netdev);
1627
1628         do {
1629                 xenbus_switch_state(dev, XenbusStateInitialising);
1630                 err = wait_event_timeout(module_wq,
1631                                  xenbus_read_driver_state(dev->otherend) !=
1632                                  XenbusStateClosed &&
1633                                  xenbus_read_driver_state(dev->otherend) !=
1634                                  XenbusStateUnknown, XENNET_TIMEOUT);
1635         } while (!err);
1636
1637         return netdev;
1638
1639  exit:
1640         xennet_free_netdev(netdev);
1641         return ERR_PTR(err);
1642 }
1643
1644 /*
1645  * Entry point to this code when a new device is created.  Allocate the basic
1646  * structures and the ring buffers for communication with the backend, and
1647  * inform the backend of the appropriate details for those.
1648  */
1649 static int netfront_probe(struct xenbus_device *dev,
1650                           const struct xenbus_device_id *id)
1651 {
1652         int err;
1653         struct net_device *netdev;
1654         struct netfront_info *info;
1655
1656         netdev = xennet_create_dev(dev);
1657         if (IS_ERR(netdev)) {
1658                 err = PTR_ERR(netdev);
1659                 xenbus_dev_fatal(dev, err, "creating netdev");
1660                 return err;
1661         }
1662
1663         info = netdev_priv(netdev);
1664         dev_set_drvdata(&dev->dev, info);
1665 #ifdef CONFIG_SYSFS
1666         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1667 #endif
1668
1669         return 0;
1670 }
1671
1672 static void xennet_end_access(int ref, void *page)
1673 {
1674         /* This frees the page as a side-effect */
1675         if (ref != GRANT_INVALID_REF)
1676                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1677 }
1678
1679 static void xennet_disconnect_backend(struct netfront_info *info)
1680 {
1681         unsigned int i = 0;
1682         unsigned int num_queues = info->netdev->real_num_tx_queues;
1683
1684         netif_carrier_off(info->netdev);
1685
1686         for (i = 0; i < num_queues && info->queues; ++i) {
1687                 struct netfront_queue *queue = &info->queues[i];
1688
1689                 del_timer_sync(&queue->rx_refill_timer);
1690
1691                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1692                         unbind_from_irqhandler(queue->tx_irq, queue);
1693                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1694                         unbind_from_irqhandler(queue->tx_irq, queue);
1695                         unbind_from_irqhandler(queue->rx_irq, queue);
1696                 }
1697                 queue->tx_evtchn = queue->rx_evtchn = 0;
1698                 queue->tx_irq = queue->rx_irq = 0;
1699
1700                 if (netif_running(info->netdev))
1701                         napi_synchronize(&queue->napi);
1702
1703                 xennet_release_tx_bufs(queue);
1704                 xennet_release_rx_bufs(queue);
1705                 gnttab_free_grant_references(queue->gref_tx_head);
1706                 gnttab_free_grant_references(queue->gref_rx_head);
1707
1708                 /* End access and free the pages */
1709                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1710                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1711
1712                 queue->tx_ring_ref = GRANT_INVALID_REF;
1713                 queue->rx_ring_ref = GRANT_INVALID_REF;
1714                 queue->tx.sring = NULL;
1715                 queue->rx.sring = NULL;
1716
1717                 page_pool_destroy(queue->page_pool);
1718         }
1719 }
1720
1721 /*
1722  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1723  * driver restart.  We tear down our netif structure and recreate it, but
1724  * leave the device-layer structures intact so that this is transparent to the
1725  * rest of the kernel.
1726  */
1727 static int netfront_resume(struct xenbus_device *dev)
1728 {
1729         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1730
1731         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1732
1733         xennet_disconnect_backend(info);
1734         return 0;
1735 }
1736
1737 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1738 {
1739         char *s, *e, *macstr;
1740         int i;
1741
1742         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1743         if (IS_ERR(macstr))
1744                 return PTR_ERR(macstr);
1745
1746         for (i = 0; i < ETH_ALEN; i++) {
1747                 mac[i] = simple_strtoul(s, &e, 16);
1748                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1749                         kfree(macstr);
1750                         return -ENOENT;
1751                 }
1752                 s = e+1;
1753         }
1754
1755         kfree(macstr);
1756         return 0;
1757 }
1758
1759 static int setup_netfront_single(struct netfront_queue *queue)
1760 {
1761         int err;
1762
1763         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1764         if (err < 0)
1765                 goto fail;
1766
1767         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1768                                         xennet_interrupt,
1769                                         0, queue->info->netdev->name, queue);
1770         if (err < 0)
1771                 goto bind_fail;
1772         queue->rx_evtchn = queue->tx_evtchn;
1773         queue->rx_irq = queue->tx_irq = err;
1774
1775         return 0;
1776
1777 bind_fail:
1778         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1779         queue->tx_evtchn = 0;
1780 fail:
1781         return err;
1782 }
1783
1784 static int setup_netfront_split(struct netfront_queue *queue)
1785 {
1786         int err;
1787
1788         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1789         if (err < 0)
1790                 goto fail;
1791         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1792         if (err < 0)
1793                 goto alloc_rx_evtchn_fail;
1794
1795         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1796                  "%s-tx", queue->name);
1797         err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1798                                         xennet_tx_interrupt,
1799                                         0, queue->tx_irq_name, queue);
1800         if (err < 0)
1801                 goto bind_tx_fail;
1802         queue->tx_irq = err;
1803
1804         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1805                  "%s-rx", queue->name);
1806         err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1807                                         xennet_rx_interrupt,
1808                                         0, queue->rx_irq_name, queue);
1809         if (err < 0)
1810                 goto bind_rx_fail;
1811         queue->rx_irq = err;
1812
1813         return 0;
1814
1815 bind_rx_fail:
1816         unbind_from_irqhandler(queue->tx_irq, queue);
1817         queue->tx_irq = 0;
1818 bind_tx_fail:
1819         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1820         queue->rx_evtchn = 0;
1821 alloc_rx_evtchn_fail:
1822         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1823         queue->tx_evtchn = 0;
1824 fail:
1825         return err;
1826 }
1827
1828 static int setup_netfront(struct xenbus_device *dev,
1829                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1830 {
1831         struct xen_netif_tx_sring *txs;
1832         struct xen_netif_rx_sring *rxs;
1833         grant_ref_t gref;
1834         int err;
1835
1836         queue->tx_ring_ref = GRANT_INVALID_REF;
1837         queue->rx_ring_ref = GRANT_INVALID_REF;
1838         queue->rx.sring = NULL;
1839         queue->tx.sring = NULL;
1840
1841         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1842         if (!txs) {
1843                 err = -ENOMEM;
1844                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1845                 goto fail;
1846         }
1847         SHARED_RING_INIT(txs);
1848         FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1849
1850         err = xenbus_grant_ring(dev, txs, 1, &gref);
1851         if (err < 0)
1852                 goto grant_tx_ring_fail;
1853         queue->tx_ring_ref = gref;
1854
1855         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1856         if (!rxs) {
1857                 err = -ENOMEM;
1858                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1859                 goto alloc_rx_ring_fail;
1860         }
1861         SHARED_RING_INIT(rxs);
1862         FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1863
1864         err = xenbus_grant_ring(dev, rxs, 1, &gref);
1865         if (err < 0)
1866                 goto grant_rx_ring_fail;
1867         queue->rx_ring_ref = gref;
1868
1869         if (feature_split_evtchn)
1870                 err = setup_netfront_split(queue);
1871         /* setup single event channel if
1872          *  a) feature-split-event-channels == 0
1873          *  b) feature-split-event-channels == 1 but failed to setup
1874          */
1875         if (!feature_split_evtchn || err)
1876                 err = setup_netfront_single(queue);
1877
1878         if (err)
1879                 goto alloc_evtchn_fail;
1880
1881         return 0;
1882
1883         /* If we fail to setup netfront, it is safe to just revoke access to
1884          * granted pages because backend is not accessing it at this point.
1885          */
1886 alloc_evtchn_fail:
1887         gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1888 grant_rx_ring_fail:
1889         free_page((unsigned long)rxs);
1890 alloc_rx_ring_fail:
1891         gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1892 grant_tx_ring_fail:
1893         free_page((unsigned long)txs);
1894 fail:
1895         return err;
1896 }
1897
1898 /* Queue-specific initialisation
1899  * This used to be done in xennet_create_dev() but must now
1900  * be run per-queue.
1901  */
1902 static int xennet_init_queue(struct netfront_queue *queue)
1903 {
1904         unsigned short i;
1905         int err = 0;
1906         char *devid;
1907
1908         spin_lock_init(&queue->tx_lock);
1909         spin_lock_init(&queue->rx_lock);
1910
1911         timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1912
1913         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1914         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1915                  devid, queue->id);
1916
1917         /* Initialise tx_skb_freelist as a free chain containing every entry. */
1918         queue->tx_skb_freelist = 0;
1919         queue->tx_pend_queue = TX_LINK_NONE;
1920         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1921                 queue->tx_link[i] = i + 1;
1922                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1923                 queue->grant_tx_page[i] = NULL;
1924         }
1925         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1926
1927         /* Clear out rx_skbs */
1928         for (i = 0; i < NET_RX_RING_SIZE; i++) {
1929                 queue->rx_skbs[i] = NULL;
1930                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1931         }
1932
1933         /* A grant for every tx ring slot */
1934         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1935                                           &queue->gref_tx_head) < 0) {
1936                 pr_alert("can't alloc tx grant refs\n");
1937                 err = -ENOMEM;
1938                 goto exit;
1939         }
1940
1941         /* A grant for every rx ring slot */
1942         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1943                                           &queue->gref_rx_head) < 0) {
1944                 pr_alert("can't alloc rx grant refs\n");
1945                 err = -ENOMEM;
1946                 goto exit_free_tx;
1947         }
1948
1949         return 0;
1950
1951  exit_free_tx:
1952         gnttab_free_grant_references(queue->gref_tx_head);
1953  exit:
1954         return err;
1955 }
1956
1957 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1958                            struct xenbus_transaction *xbt, int write_hierarchical)
1959 {
1960         /* Write the queue-specific keys into XenStore in the traditional
1961          * way for a single queue, or in a queue subkeys for multiple
1962          * queues.
1963          */
1964         struct xenbus_device *dev = queue->info->xbdev;
1965         int err;
1966         const char *message;
1967         char *path;
1968         size_t pathsize;
1969
1970         /* Choose the correct place to write the keys */
1971         if (write_hierarchical) {
1972                 pathsize = strlen(dev->nodename) + 10;
1973                 path = kzalloc(pathsize, GFP_KERNEL);
1974                 if (!path) {
1975                         err = -ENOMEM;
1976                         message = "out of memory while writing ring references";
1977                         goto error;
1978                 }
1979                 snprintf(path, pathsize, "%s/queue-%u",
1980                                 dev->nodename, queue->id);
1981         } else {
1982                 path = (char *)dev->nodename;
1983         }
1984
1985         /* Write ring references */
1986         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1987                         queue->tx_ring_ref);
1988         if (err) {
1989                 message = "writing tx-ring-ref";
1990                 goto error;
1991         }
1992
1993         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1994                         queue->rx_ring_ref);
1995         if (err) {
1996                 message = "writing rx-ring-ref";
1997                 goto error;
1998         }
1999
2000         /* Write event channels; taking into account both shared
2001          * and split event channel scenarios.
2002          */
2003         if (queue->tx_evtchn == queue->rx_evtchn) {
2004                 /* Shared event channel */
2005                 err = xenbus_printf(*xbt, path,
2006                                 "event-channel", "%u", queue->tx_evtchn);
2007                 if (err) {
2008                         message = "writing event-channel";
2009                         goto error;
2010                 }
2011         } else {
2012                 /* Split event channels */
2013                 err = xenbus_printf(*xbt, path,
2014                                 "event-channel-tx", "%u", queue->tx_evtchn);
2015                 if (err) {
2016                         message = "writing event-channel-tx";
2017                         goto error;
2018                 }
2019
2020                 err = xenbus_printf(*xbt, path,
2021                                 "event-channel-rx", "%u", queue->rx_evtchn);
2022                 if (err) {
2023                         message = "writing event-channel-rx";
2024                         goto error;
2025                 }
2026         }
2027
2028         if (write_hierarchical)
2029                 kfree(path);
2030         return 0;
2031
2032 error:
2033         if (write_hierarchical)
2034                 kfree(path);
2035         xenbus_dev_fatal(dev, err, "%s", message);
2036         return err;
2037 }
2038
2039 static void xennet_destroy_queues(struct netfront_info *info)
2040 {
2041         unsigned int i;
2042
2043         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
2044                 struct netfront_queue *queue = &info->queues[i];
2045
2046                 if (netif_running(info->netdev))
2047                         napi_disable(&queue->napi);
2048                 netif_napi_del(&queue->napi);
2049         }
2050
2051         kfree(info->queues);
2052         info->queues = NULL;
2053 }
2054
2055
2056
2057 static int xennet_create_page_pool(struct netfront_queue *queue)
2058 {
2059         int err;
2060         struct page_pool_params pp_params = {
2061                 .order = 0,
2062                 .flags = 0,
2063                 .pool_size = NET_RX_RING_SIZE,
2064                 .nid = NUMA_NO_NODE,
2065                 .dev = &queue->info->netdev->dev,
2066                 .offset = XDP_PACKET_HEADROOM,
2067                 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2068         };
2069
2070         queue->page_pool = page_pool_create(&pp_params);
2071         if (IS_ERR(queue->page_pool)) {
2072                 err = PTR_ERR(queue->page_pool);
2073                 queue->page_pool = NULL;
2074                 return err;
2075         }
2076
2077         err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2078                                queue->id, 0);
2079         if (err) {
2080                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2081                 goto err_free_pp;
2082         }
2083
2084         err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2085                                          MEM_TYPE_PAGE_POOL, queue->page_pool);
2086         if (err) {
2087                 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2088                 goto err_unregister_rxq;
2089         }
2090         return 0;
2091
2092 err_unregister_rxq:
2093         xdp_rxq_info_unreg(&queue->xdp_rxq);
2094 err_free_pp:
2095         page_pool_destroy(queue->page_pool);
2096         queue->page_pool = NULL;
2097         return err;
2098 }
2099
2100 static int xennet_create_queues(struct netfront_info *info,
2101                                 unsigned int *num_queues)
2102 {
2103         unsigned int i;
2104         int ret;
2105
2106         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2107                                GFP_KERNEL);
2108         if (!info->queues)
2109                 return -ENOMEM;
2110
2111         for (i = 0; i < *num_queues; i++) {
2112                 struct netfront_queue *queue = &info->queues[i];
2113
2114                 queue->id = i;
2115                 queue->info = info;
2116
2117                 ret = xennet_init_queue(queue);
2118                 if (ret < 0) {
2119                         dev_warn(&info->xbdev->dev,
2120                                  "only created %d queues\n", i);
2121                         *num_queues = i;
2122                         break;
2123                 }
2124
2125                 /* use page pool recycling instead of buddy allocator */
2126                 ret = xennet_create_page_pool(queue);
2127                 if (ret < 0) {
2128                         dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2129                         *num_queues = i;
2130                         return ret;
2131                 }
2132
2133                 netif_napi_add(queue->info->netdev, &queue->napi,
2134                                xennet_poll, 64);
2135                 if (netif_running(info->netdev))
2136                         napi_enable(&queue->napi);
2137         }
2138
2139         netif_set_real_num_tx_queues(info->netdev, *num_queues);
2140
2141         if (*num_queues == 0) {
2142                 dev_err(&info->xbdev->dev, "no queues\n");
2143                 return -EINVAL;
2144         }
2145         return 0;
2146 }
2147
2148 /* Common code used when first setting up, and when resuming. */
2149 static int talk_to_netback(struct xenbus_device *dev,
2150                            struct netfront_info *info)
2151 {
2152         const char *message;
2153         struct xenbus_transaction xbt;
2154         int err;
2155         unsigned int feature_split_evtchn;
2156         unsigned int i = 0;
2157         unsigned int max_queues = 0;
2158         struct netfront_queue *queue = NULL;
2159         unsigned int num_queues = 1;
2160
2161         info->netdev->irq = 0;
2162
2163         /* Check if backend supports multiple queues */
2164         max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2165                                           "multi-queue-max-queues", 1);
2166         num_queues = min(max_queues, xennet_max_queues);
2167
2168         /* Check feature-split-event-channels */
2169         feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2170                                         "feature-split-event-channels", 0);
2171
2172         /* Read mac addr. */
2173         err = xen_net_read_mac(dev, info->netdev->dev_addr);
2174         if (err) {
2175                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2176                 goto out_unlocked;
2177         }
2178
2179         info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2180                                                               "feature-xdp-headroom", 0);
2181         if (info->netback_has_xdp_headroom) {
2182                 /* set the current xen-netfront xdp state */
2183                 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2184                                           NETBACK_XDP_HEADROOM_ENABLE :
2185                                           NETBACK_XDP_HEADROOM_DISABLE);
2186                 if (err)
2187                         goto out_unlocked;
2188         }
2189
2190         rtnl_lock();
2191         if (info->queues)
2192                 xennet_destroy_queues(info);
2193
2194         /* For the case of a reconnect reset the "broken" indicator. */
2195         info->broken = false;
2196
2197         err = xennet_create_queues(info, &num_queues);
2198         if (err < 0) {
2199                 xenbus_dev_fatal(dev, err, "creating queues");
2200                 kfree(info->queues);
2201                 info->queues = NULL;
2202                 goto out;
2203         }
2204         rtnl_unlock();
2205
2206         /* Create shared ring, alloc event channel -- for each queue */
2207         for (i = 0; i < num_queues; ++i) {
2208                 queue = &info->queues[i];
2209                 err = setup_netfront(dev, queue, feature_split_evtchn);
2210                 if (err)
2211                         goto destroy_ring;
2212         }
2213
2214 again:
2215         err = xenbus_transaction_start(&xbt);
2216         if (err) {
2217                 xenbus_dev_fatal(dev, err, "starting transaction");
2218                 goto destroy_ring;
2219         }
2220
2221         if (xenbus_exists(XBT_NIL,
2222                           info->xbdev->otherend, "multi-queue-max-queues")) {
2223                 /* Write the number of queues */
2224                 err = xenbus_printf(xbt, dev->nodename,
2225                                     "multi-queue-num-queues", "%u", num_queues);
2226                 if (err) {
2227                         message = "writing multi-queue-num-queues";
2228                         goto abort_transaction_no_dev_fatal;
2229                 }
2230         }
2231
2232         if (num_queues == 1) {
2233                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2234                 if (err)
2235                         goto abort_transaction_no_dev_fatal;
2236         } else {
2237                 /* Write the keys for each queue */
2238                 for (i = 0; i < num_queues; ++i) {
2239                         queue = &info->queues[i];
2240                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2241                         if (err)
2242                                 goto abort_transaction_no_dev_fatal;
2243                 }
2244         }
2245
2246         /* The remaining keys are not queue-specific */
2247         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2248                             1);
2249         if (err) {
2250                 message = "writing request-rx-copy";
2251                 goto abort_transaction;
2252         }
2253
2254         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2255         if (err) {
2256                 message = "writing feature-rx-notify";
2257                 goto abort_transaction;
2258         }
2259
2260         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2261         if (err) {
2262                 message = "writing feature-sg";
2263                 goto abort_transaction;
2264         }
2265
2266         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2267         if (err) {
2268                 message = "writing feature-gso-tcpv4";
2269                 goto abort_transaction;
2270         }
2271
2272         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2273         if (err) {
2274                 message = "writing feature-gso-tcpv6";
2275                 goto abort_transaction;
2276         }
2277
2278         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2279                            "1");
2280         if (err) {
2281                 message = "writing feature-ipv6-csum-offload";
2282                 goto abort_transaction;
2283         }
2284
2285         err = xenbus_transaction_end(xbt, 0);
2286         if (err) {
2287                 if (err == -EAGAIN)
2288                         goto again;
2289                 xenbus_dev_fatal(dev, err, "completing transaction");
2290                 goto destroy_ring;
2291         }
2292
2293         return 0;
2294
2295  abort_transaction:
2296         xenbus_dev_fatal(dev, err, "%s", message);
2297 abort_transaction_no_dev_fatal:
2298         xenbus_transaction_end(xbt, 1);
2299  destroy_ring:
2300         xennet_disconnect_backend(info);
2301         rtnl_lock();
2302         xennet_destroy_queues(info);
2303  out:
2304         rtnl_unlock();
2305 out_unlocked:
2306         device_unregister(&dev->dev);
2307         return err;
2308 }
2309
2310 static int xennet_connect(struct net_device *dev)
2311 {
2312         struct netfront_info *np = netdev_priv(dev);
2313         unsigned int num_queues = 0;
2314         int err;
2315         unsigned int j = 0;
2316         struct netfront_queue *queue = NULL;
2317
2318         if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2319                 dev_info(&dev->dev,
2320                          "backend does not support copying receive path\n");
2321                 return -ENODEV;
2322         }
2323
2324         err = talk_to_netback(np->xbdev, np);
2325         if (err)
2326                 return err;
2327         if (np->netback_has_xdp_headroom)
2328                 pr_info("backend supports XDP headroom\n");
2329
2330         /* talk_to_netback() sets the correct number of queues */
2331         num_queues = dev->real_num_tx_queues;
2332
2333         if (dev->reg_state == NETREG_UNINITIALIZED) {
2334                 err = register_netdev(dev);
2335                 if (err) {
2336                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2337                         device_unregister(&np->xbdev->dev);
2338                         return err;
2339                 }
2340         }
2341
2342         rtnl_lock();
2343         netdev_update_features(dev);
2344         rtnl_unlock();
2345
2346         /*
2347          * All public and private state should now be sane.  Get
2348          * ready to start sending and receiving packets and give the driver
2349          * domain a kick because we've probably just requeued some
2350          * packets.
2351          */
2352         netif_carrier_on(np->netdev);
2353         for (j = 0; j < num_queues; ++j) {
2354                 queue = &np->queues[j];
2355
2356                 notify_remote_via_irq(queue->tx_irq);
2357                 if (queue->tx_irq != queue->rx_irq)
2358                         notify_remote_via_irq(queue->rx_irq);
2359
2360                 spin_lock_irq(&queue->tx_lock);
2361                 xennet_tx_buf_gc(queue);
2362                 spin_unlock_irq(&queue->tx_lock);
2363
2364                 spin_lock_bh(&queue->rx_lock);
2365                 xennet_alloc_rx_buffers(queue);
2366                 spin_unlock_bh(&queue->rx_lock);
2367         }
2368
2369         return 0;
2370 }
2371
2372 /*
2373  * Callback received when the backend's state changes.
2374  */
2375 static void netback_changed(struct xenbus_device *dev,
2376                             enum xenbus_state backend_state)
2377 {
2378         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2379         struct net_device *netdev = np->netdev;
2380
2381         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2382
2383         wake_up_all(&module_wq);
2384
2385         switch (backend_state) {
2386         case XenbusStateInitialising:
2387         case XenbusStateInitialised:
2388         case XenbusStateReconfiguring:
2389         case XenbusStateReconfigured:
2390         case XenbusStateUnknown:
2391                 break;
2392
2393         case XenbusStateInitWait:
2394                 if (dev->state != XenbusStateInitialising)
2395                         break;
2396                 if (xennet_connect(netdev) != 0)
2397                         break;
2398                 xenbus_switch_state(dev, XenbusStateConnected);
2399                 break;
2400
2401         case XenbusStateConnected:
2402                 netdev_notify_peers(netdev);
2403                 break;
2404
2405         case XenbusStateClosed:
2406                 if (dev->state == XenbusStateClosed)
2407                         break;
2408                 fallthrough;    /* Missed the backend's CLOSING state */
2409         case XenbusStateClosing:
2410                 xenbus_frontend_closed(dev);
2411                 break;
2412         }
2413 }
2414
2415 static const struct xennet_stat {
2416         char name[ETH_GSTRING_LEN];
2417         u16 offset;
2418 } xennet_stats[] = {
2419         {
2420                 "rx_gso_checksum_fixup",
2421                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2422         },
2423 };
2424
2425 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2426 {
2427         switch (string_set) {
2428         case ETH_SS_STATS:
2429                 return ARRAY_SIZE(xennet_stats);
2430         default:
2431                 return -EINVAL;
2432         }
2433 }
2434
2435 static void xennet_get_ethtool_stats(struct net_device *dev,
2436                                      struct ethtool_stats *stats, u64 * data)
2437 {
2438         void *np = netdev_priv(dev);
2439         int i;
2440
2441         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2442                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2443 }
2444
2445 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2446 {
2447         int i;
2448
2449         switch (stringset) {
2450         case ETH_SS_STATS:
2451                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2452                         memcpy(data + i * ETH_GSTRING_LEN,
2453                                xennet_stats[i].name, ETH_GSTRING_LEN);
2454                 break;
2455         }
2456 }
2457
2458 static const struct ethtool_ops xennet_ethtool_ops =
2459 {
2460         .get_link = ethtool_op_get_link,
2461
2462         .get_sset_count = xennet_get_sset_count,
2463         .get_ethtool_stats = xennet_get_ethtool_stats,
2464         .get_strings = xennet_get_strings,
2465         .get_ts_info = ethtool_op_get_ts_info,
2466 };
2467
2468 #ifdef CONFIG_SYSFS
2469 static ssize_t show_rxbuf(struct device *dev,
2470                           struct device_attribute *attr, char *buf)
2471 {
2472         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2473 }
2474
2475 static ssize_t store_rxbuf(struct device *dev,
2476                            struct device_attribute *attr,
2477                            const char *buf, size_t len)
2478 {
2479         char *endp;
2480
2481         if (!capable(CAP_NET_ADMIN))
2482                 return -EPERM;
2483
2484         simple_strtoul(buf, &endp, 0);
2485         if (endp == buf)
2486                 return -EBADMSG;
2487
2488         /* rxbuf_min and rxbuf_max are no longer configurable. */
2489
2490         return len;
2491 }
2492
2493 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2494 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2495 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2496
2497 static struct attribute *xennet_dev_attrs[] = {
2498         &dev_attr_rxbuf_min.attr,
2499         &dev_attr_rxbuf_max.attr,
2500         &dev_attr_rxbuf_cur.attr,
2501         NULL
2502 };
2503
2504 static const struct attribute_group xennet_dev_group = {
2505         .attrs = xennet_dev_attrs
2506 };
2507 #endif /* CONFIG_SYSFS */
2508
2509 static void xennet_bus_close(struct xenbus_device *dev)
2510 {
2511         int ret;
2512
2513         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2514                 return;
2515         do {
2516                 xenbus_switch_state(dev, XenbusStateClosing);
2517                 ret = wait_event_timeout(module_wq,
2518                                    xenbus_read_driver_state(dev->otherend) ==
2519                                    XenbusStateClosing ||
2520                                    xenbus_read_driver_state(dev->otherend) ==
2521                                    XenbusStateClosed ||
2522                                    xenbus_read_driver_state(dev->otherend) ==
2523                                    XenbusStateUnknown,
2524                                    XENNET_TIMEOUT);
2525         } while (!ret);
2526
2527         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2528                 return;
2529
2530         do {
2531                 xenbus_switch_state(dev, XenbusStateClosed);
2532                 ret = wait_event_timeout(module_wq,
2533                                    xenbus_read_driver_state(dev->otherend) ==
2534                                    XenbusStateClosed ||
2535                                    xenbus_read_driver_state(dev->otherend) ==
2536                                    XenbusStateUnknown,
2537                                    XENNET_TIMEOUT);
2538         } while (!ret);
2539 }
2540
2541 static int xennet_remove(struct xenbus_device *dev)
2542 {
2543         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2544
2545         xennet_bus_close(dev);
2546         xennet_disconnect_backend(info);
2547
2548         if (info->netdev->reg_state == NETREG_REGISTERED)
2549                 unregister_netdev(info->netdev);
2550
2551         if (info->queues) {
2552                 rtnl_lock();
2553                 xennet_destroy_queues(info);
2554                 rtnl_unlock();
2555         }
2556         xennet_free_netdev(info->netdev);
2557
2558         return 0;
2559 }
2560
2561 static const struct xenbus_device_id netfront_ids[] = {
2562         { "vif" },
2563         { "" }
2564 };
2565
2566 static struct xenbus_driver netfront_driver = {
2567         .ids = netfront_ids,
2568         .probe = netfront_probe,
2569         .remove = xennet_remove,
2570         .resume = netfront_resume,
2571         .otherend_changed = netback_changed,
2572 };
2573
2574 static int __init netif_init(void)
2575 {
2576         if (!xen_domain())
2577                 return -ENODEV;
2578
2579         if (!xen_has_pv_nic_devices())
2580                 return -ENODEV;
2581
2582         pr_info("Initialising Xen virtual ethernet driver\n");
2583
2584         /* Allow as many queues as there are CPUs inut max. 8 if user has not
2585          * specified a value.
2586          */
2587         if (xennet_max_queues == 0)
2588                 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2589                                           num_online_cpus());
2590
2591         return xenbus_register_frontend(&netfront_driver);
2592 }
2593 module_init(netif_init);
2594
2595
2596 static void __exit netif_exit(void)
2597 {
2598         xenbus_unregister_driver(&netfront_driver);
2599 }
2600 module_exit(netif_exit);
2601
2602 MODULE_DESCRIPTION("Xen virtual network device frontend");
2603 MODULE_LICENSE("GPL");
2604 MODULE_ALIAS("xen:vif");
2605 MODULE_ALIAS("xennet");