2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/tcp.h>
40 #include <linux/udp.h>
41 #include <linux/moduleparam.h>
45 #include <xen/xenbus.h>
46 #include <xen/events.h>
48 #include <xen/grant_table.h>
50 #include <xen/interface/io/netif.h>
51 #include <xen/interface/memory.h>
52 #include <xen/interface/grant_table.h>
54 static struct ethtool_ops xennet_ethtool_ops;
61 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
63 #define RX_COPY_THRESHOLD 256
65 #define GRANT_INVALID_REF 0
67 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
68 #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
69 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
71 struct netfront_info {
72 struct list_head list;
73 struct net_device *netdev;
75 struct napi_struct napi;
76 struct net_device_stats stats;
78 struct xen_netif_tx_front_ring tx;
79 struct xen_netif_rx_front_ring rx;
86 /* Receive-ring batched refills. */
87 #define RX_MIN_TARGET 8
88 #define RX_DFL_MIN_TARGET 64
89 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
90 unsigned rx_min_target, rx_max_target, rx_target;
91 struct sk_buff_head rx_batch;
93 struct timer_list rx_refill_timer;
96 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
97 * are linked from tx_skb_freelist through skb_entry.link.
99 * NB. Freelist index entries are always going to be less than
100 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
101 * greater than PAGE_OFFSET: we use this property to distinguish
107 } tx_skbs[NET_TX_RING_SIZE];
108 grant_ref_t gref_tx_head;
109 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
110 unsigned tx_skb_freelist;
112 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
113 grant_ref_t gref_rx_head;
114 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
116 struct xenbus_device *xbdev;
120 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
121 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
125 struct netfront_rx_info {
126 struct xen_netif_rx_response rx;
127 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
131 * Access macros for acquiring freeing slots in tx_skbs[].
134 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
137 list[id].link = *head;
141 static unsigned short get_id_from_freelist(unsigned *head,
142 union skb_entry *list)
144 unsigned int id = *head;
145 *head = list[id].link;
149 static int xennet_rxidx(RING_IDX idx)
151 return idx & (NET_RX_RING_SIZE - 1);
154 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
157 int i = xennet_rxidx(ri);
158 struct sk_buff *skb = np->rx_skbs[i];
159 np->rx_skbs[i] = NULL;
163 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
166 int i = xennet_rxidx(ri);
167 grant_ref_t ref = np->grant_rx_ref[i];
168 np->grant_rx_ref[i] = GRANT_INVALID_REF;
173 static int xennet_sysfs_addif(struct net_device *netdev);
174 static void xennet_sysfs_delif(struct net_device *netdev);
175 #else /* !CONFIG_SYSFS */
176 #define xennet_sysfs_addif(dev) (0)
177 #define xennet_sysfs_delif(dev) do { } while (0)
180 static int xennet_can_sg(struct net_device *dev)
182 return dev->features & NETIF_F_SG;
186 static void rx_refill_timeout(unsigned long data)
188 struct net_device *dev = (struct net_device *)data;
189 struct netfront_info *np = netdev_priv(dev);
190 netif_rx_schedule(dev, &np->napi);
193 static int netfront_tx_slot_available(struct netfront_info *np)
195 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
196 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
199 static void xennet_maybe_wake_tx(struct net_device *dev)
201 struct netfront_info *np = netdev_priv(dev);
203 if (unlikely(netif_queue_stopped(dev)) &&
204 netfront_tx_slot_available(np) &&
205 likely(netif_running(dev)))
206 netif_wake_queue(dev);
209 static void xennet_alloc_rx_buffers(struct net_device *dev)
212 struct netfront_info *np = netdev_priv(dev);
215 int i, batch_target, notify;
216 RING_IDX req_prod = np->rx.req_prod_pvt;
220 struct xen_netif_rx_request *req;
222 if (unlikely(!netif_carrier_ok(dev)))
226 * Allocate skbuffs greedily, even though we batch updates to the
227 * receive ring. This creates a less bursty demand on the memory
228 * allocator, so should reduce the chance of failed allocation requests
229 * both for ourself and for other kernel subsystems.
231 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
232 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
233 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
234 GFP_ATOMIC | __GFP_NOWARN);
238 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
242 /* Any skbuffs queued for refill? Force them out. */
245 /* Could not allocate any skbuffs. Try again later. */
246 mod_timer(&np->rx_refill_timer,
251 skb_shinfo(skb)->frags[0].page = page;
252 skb_shinfo(skb)->nr_frags = 1;
253 __skb_queue_tail(&np->rx_batch, skb);
256 /* Is the batch large enough to be worthwhile? */
257 if (i < (np->rx_target/2)) {
258 if (req_prod > np->rx.sring->req_prod)
263 /* Adjust our fill target if we risked running out of buffers. */
264 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
265 ((np->rx_target *= 2) > np->rx_max_target))
266 np->rx_target = np->rx_max_target;
270 skb = __skb_dequeue(&np->rx_batch);
276 id = xennet_rxidx(req_prod + i);
278 BUG_ON(np->rx_skbs[id]);
279 np->rx_skbs[id] = skb;
281 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
282 BUG_ON((signed short)ref < 0);
283 np->grant_rx_ref[id] = ref;
285 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
286 vaddr = page_address(skb_shinfo(skb)->frags[0].page);
288 req = RING_GET_REQUEST(&np->rx, req_prod + i);
289 gnttab_grant_foreign_access_ref(ref,
290 np->xbdev->otherend_id,
298 wmb(); /* barrier so backend seens requests */
300 /* Above is a suitable barrier to ensure backend will see requests. */
301 np->rx.req_prod_pvt = req_prod + i;
303 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
305 notify_remote_via_irq(np->netdev->irq);
308 static int xennet_open(struct net_device *dev)
310 struct netfront_info *np = netdev_priv(dev);
312 memset(&np->stats, 0, sizeof(np->stats));
314 napi_enable(&np->napi);
316 spin_lock_bh(&np->rx_lock);
317 if (netif_carrier_ok(dev)) {
318 xennet_alloc_rx_buffers(dev);
319 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
320 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
321 netif_rx_schedule(dev, &np->napi);
323 spin_unlock_bh(&np->rx_lock);
325 xennet_maybe_wake_tx(dev);
330 static void xennet_tx_buf_gc(struct net_device *dev)
334 struct netfront_info *np = netdev_priv(dev);
337 BUG_ON(!netif_carrier_ok(dev));
340 prod = np->tx.sring->rsp_prod;
341 rmb(); /* Ensure we see responses up to 'rp'. */
343 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
344 struct xen_netif_tx_response *txrsp;
346 txrsp = RING_GET_RESPONSE(&np->tx, cons);
347 if (txrsp->status == NETIF_RSP_NULL)
351 skb = np->tx_skbs[id].skb;
352 if (unlikely(gnttab_query_foreign_access(
353 np->grant_tx_ref[id]) != 0)) {
354 printk(KERN_ALERT "xennet_tx_buf_gc: warning "
355 "-- grant still in use by backend "
359 gnttab_end_foreign_access_ref(
360 np->grant_tx_ref[id], GNTMAP_readonly);
361 gnttab_release_grant_reference(
362 &np->gref_tx_head, np->grant_tx_ref[id]);
363 np->grant_tx_ref[id] = GRANT_INVALID_REF;
364 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
365 dev_kfree_skb_irq(skb);
368 np->tx.rsp_cons = prod;
371 * Set a new event, then check for race with update of tx_cons.
372 * Note that it is essential to schedule a callback, no matter
373 * how few buffers are pending. Even if there is space in the
374 * transmit ring, higher layers may be blocked because too much
375 * data is outstanding: in such cases notification from Xen is
376 * likely to be the only kick that we'll get.
378 np->tx.sring->rsp_event =
379 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
380 mb(); /* update shared area */
381 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
383 xennet_maybe_wake_tx(dev);
386 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
387 struct xen_netif_tx_request *tx)
389 struct netfront_info *np = netdev_priv(dev);
390 char *data = skb->data;
392 RING_IDX prod = np->tx.req_prod_pvt;
393 int frags = skb_shinfo(skb)->nr_frags;
394 unsigned int offset = offset_in_page(data);
395 unsigned int len = skb_headlen(skb);
400 /* While the header overlaps a page boundary (including being
401 larger than a page), split it it into page-sized chunks. */
402 while (len > PAGE_SIZE - offset) {
403 tx->size = PAGE_SIZE - offset;
404 tx->flags |= NETTXF_more_data;
409 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
410 np->tx_skbs[id].skb = skb_get(skb);
411 tx = RING_GET_REQUEST(&np->tx, prod++);
413 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
414 BUG_ON((signed short)ref < 0);
416 mfn = virt_to_mfn(data);
417 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
418 mfn, GNTMAP_readonly);
420 tx->gref = np->grant_tx_ref[id] = ref;
426 /* Grant backend access to each skb fragment page. */
427 for (i = 0; i < frags; i++) {
428 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
430 tx->flags |= NETTXF_more_data;
432 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
433 np->tx_skbs[id].skb = skb_get(skb);
434 tx = RING_GET_REQUEST(&np->tx, prod++);
436 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
437 BUG_ON((signed short)ref < 0);
439 mfn = pfn_to_mfn(page_to_pfn(frag->page));
440 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
441 mfn, GNTMAP_readonly);
443 tx->gref = np->grant_tx_ref[id] = ref;
444 tx->offset = frag->page_offset;
445 tx->size = frag->size;
449 np->tx.req_prod_pvt = prod;
452 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
455 struct netfront_info *np = netdev_priv(dev);
456 struct xen_netif_tx_request *tx;
457 struct xen_netif_extra_info *extra;
458 char *data = skb->data;
463 int frags = skb_shinfo(skb)->nr_frags;
464 unsigned int offset = offset_in_page(data);
465 unsigned int len = skb_headlen(skb);
467 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
468 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
469 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
475 spin_lock_irq(&np->tx_lock);
477 if (unlikely(!netif_carrier_ok(dev) ||
478 (frags > 1 && !xennet_can_sg(dev)) ||
479 netif_needs_gso(dev, skb))) {
480 spin_unlock_irq(&np->tx_lock);
484 i = np->tx.req_prod_pvt;
486 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
487 np->tx_skbs[id].skb = skb;
489 tx = RING_GET_REQUEST(&np->tx, i);
492 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
493 BUG_ON((signed short)ref < 0);
494 mfn = virt_to_mfn(data);
495 gnttab_grant_foreign_access_ref(
496 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
497 tx->gref = np->grant_tx_ref[id] = ref;
503 if (skb->ip_summed == CHECKSUM_PARTIAL)
505 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
506 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
507 /* remote but checksummed. */
508 tx->flags |= NETTXF_data_validated;
510 if (skb_shinfo(skb)->gso_size) {
511 struct xen_netif_extra_info *gso;
513 gso = (struct xen_netif_extra_info *)
514 RING_GET_REQUEST(&np->tx, ++i);
517 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
519 tx->flags |= NETTXF_extra_info;
521 gso->u.gso.size = skb_shinfo(skb)->gso_size;
522 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
524 gso->u.gso.features = 0;
526 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
531 np->tx.req_prod_pvt = i + 1;
533 xennet_make_frags(skb, dev, tx);
536 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
538 notify_remote_via_irq(np->netdev->irq);
540 np->stats.tx_bytes += skb->len;
541 np->stats.tx_packets++;
543 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
544 xennet_tx_buf_gc(dev);
546 if (!netfront_tx_slot_available(np))
547 netif_stop_queue(dev);
549 spin_unlock_irq(&np->tx_lock);
554 np->stats.tx_dropped++;
559 static int xennet_close(struct net_device *dev)
561 struct netfront_info *np = netdev_priv(dev);
562 netif_stop_queue(np->netdev);
563 napi_disable(&np->napi);
567 static struct net_device_stats *xennet_get_stats(struct net_device *dev)
569 struct netfront_info *np = netdev_priv(dev);
573 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
576 int new = xennet_rxidx(np->rx.req_prod_pvt);
578 BUG_ON(np->rx_skbs[new]);
579 np->rx_skbs[new] = skb;
580 np->grant_rx_ref[new] = ref;
581 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
582 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
583 np->rx.req_prod_pvt++;
586 static int xennet_get_extras(struct netfront_info *np,
587 struct xen_netif_extra_info *extras,
591 struct xen_netif_extra_info *extra;
592 struct device *dev = &np->netdev->dev;
593 RING_IDX cons = np->rx.rsp_cons;
600 if (unlikely(cons + 1 == rp)) {
602 dev_warn(dev, "Missing extra info\n");
607 extra = (struct xen_netif_extra_info *)
608 RING_GET_RESPONSE(&np->rx, ++cons);
610 if (unlikely(!extra->type ||
611 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
613 dev_warn(dev, "Invalid extra type: %d\n",
617 memcpy(&extras[extra->type - 1], extra,
621 skb = xennet_get_rx_skb(np, cons);
622 ref = xennet_get_rx_ref(np, cons);
623 xennet_move_rx_slot(np, skb, ref);
624 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
626 np->rx.rsp_cons = cons;
630 static int xennet_get_responses(struct netfront_info *np,
631 struct netfront_rx_info *rinfo, RING_IDX rp,
632 struct sk_buff_head *list)
634 struct xen_netif_rx_response *rx = &rinfo->rx;
635 struct xen_netif_extra_info *extras = rinfo->extras;
636 struct device *dev = &np->netdev->dev;
637 RING_IDX cons = np->rx.rsp_cons;
638 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
639 grant_ref_t ref = xennet_get_rx_ref(np, cons);
640 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
645 if (rx->flags & NETRXF_extra_info) {
646 err = xennet_get_extras(np, extras, rp);
647 cons = np->rx.rsp_cons;
651 if (unlikely(rx->status < 0 ||
652 rx->offset + rx->status > PAGE_SIZE)) {
654 dev_warn(dev, "rx->offset: %x, size: %u\n",
655 rx->offset, rx->status);
656 xennet_move_rx_slot(np, skb, ref);
662 * This definitely indicates a bug, either in this driver or in
663 * the backend driver. In future this should flag the bad
664 * situation to the system controller to reboot the backed.
666 if (ref == GRANT_INVALID_REF) {
668 dev_warn(dev, "Bad rx response id %d.\n",
674 ret = gnttab_end_foreign_access_ref(ref, 0);
677 gnttab_release_grant_reference(&np->gref_rx_head, ref);
679 __skb_queue_tail(list, skb);
682 if (!(rx->flags & NETRXF_more_data))
685 if (cons + frags == rp) {
687 dev_warn(dev, "Need more frags\n");
692 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
693 skb = xennet_get_rx_skb(np, cons + frags);
694 ref = xennet_get_rx_ref(np, cons + frags);
698 if (unlikely(frags > max)) {
700 dev_warn(dev, "Too many frags\n");
705 np->rx.rsp_cons = cons + frags;
710 static int xennet_set_skb_gso(struct sk_buff *skb,
711 struct xen_netif_extra_info *gso)
713 if (!gso->u.gso.size) {
715 printk(KERN_WARNING "GSO size must not be zero.\n");
719 /* Currently only TCPv4 S.O. is supported. */
720 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
722 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
726 skb_shinfo(skb)->gso_size = gso->u.gso.size;
727 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
729 /* Header must be checked, and gso_segs computed. */
730 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
731 skb_shinfo(skb)->gso_segs = 0;
736 static RING_IDX xennet_fill_frags(struct netfront_info *np,
738 struct sk_buff_head *list)
740 struct skb_shared_info *shinfo = skb_shinfo(skb);
741 int nr_frags = shinfo->nr_frags;
742 RING_IDX cons = np->rx.rsp_cons;
743 skb_frag_t *frag = shinfo->frags + nr_frags;
744 struct sk_buff *nskb;
746 while ((nskb = __skb_dequeue(list))) {
747 struct xen_netif_rx_response *rx =
748 RING_GET_RESPONSE(&np->rx, ++cons);
750 frag->page = skb_shinfo(nskb)->frags[0].page;
751 frag->page_offset = rx->offset;
752 frag->size = rx->status;
754 skb->data_len += rx->status;
756 skb_shinfo(nskb)->nr_frags = 0;
763 shinfo->nr_frags = nr_frags;
767 static int skb_checksum_setup(struct sk_buff *skb)
773 if (skb->protocol != htons(ETH_P_IP))
776 iph = (void *)skb->data;
777 th = skb->data + 4 * iph->ihl;
778 if (th >= skb_tail_pointer(skb))
781 skb->csum_start = th - skb->head;
782 switch (iph->protocol) {
784 skb->csum_offset = offsetof(struct tcphdr, check);
787 skb->csum_offset = offsetof(struct udphdr, check);
791 printk(KERN_ERR "Attempting to checksum a non-"
792 "TCP/UDP packet, dropping a protocol"
793 " %d packet", iph->protocol);
797 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
806 static int handle_incoming_queue(struct net_device *dev,
807 struct sk_buff_head *rxq)
809 struct netfront_info *np = netdev_priv(dev);
810 int packets_dropped = 0;
813 while ((skb = __skb_dequeue(rxq)) != NULL) {
814 struct page *page = NETFRONT_SKB_CB(skb)->page;
815 void *vaddr = page_address(page);
816 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
818 memcpy(skb->data, vaddr + offset,
821 if (page != skb_shinfo(skb)->frags[0].page)
824 /* Ethernet work: Delayed to here as it peeks the header. */
825 skb->protocol = eth_type_trans(skb, dev);
827 if (skb->ip_summed == CHECKSUM_PARTIAL) {
828 if (skb_checksum_setup(skb)) {
831 np->stats.rx_errors++;
836 np->stats.rx_packets++;
837 np->stats.rx_bytes += skb->len;
840 netif_receive_skb(skb);
841 dev->last_rx = jiffies;
844 return packets_dropped;
847 static int xennet_poll(struct napi_struct *napi, int budget)
849 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
850 struct net_device *dev = np->netdev;
852 struct netfront_rx_info rinfo;
853 struct xen_netif_rx_response *rx = &rinfo.rx;
854 struct xen_netif_extra_info *extras = rinfo.extras;
857 struct sk_buff_head rxq;
858 struct sk_buff_head errq;
859 struct sk_buff_head tmpq;
864 spin_lock(&np->rx_lock);
866 if (unlikely(!netif_carrier_ok(dev))) {
867 spin_unlock(&np->rx_lock);
871 skb_queue_head_init(&rxq);
872 skb_queue_head_init(&errq);
873 skb_queue_head_init(&tmpq);
875 rp = np->rx.sring->rsp_prod;
876 rmb(); /* Ensure we see queued responses up to 'rp'. */
880 while ((i != rp) && (work_done < budget)) {
881 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
882 memset(extras, 0, sizeof(rinfo.extras));
884 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
888 while ((skb = __skb_dequeue(&tmpq)))
889 __skb_queue_tail(&errq, skb);
890 np->stats.rx_errors++;
895 skb = __skb_dequeue(&tmpq);
897 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
898 struct xen_netif_extra_info *gso;
899 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
901 if (unlikely(xennet_set_skb_gso(skb, gso))) {
902 __skb_queue_head(&tmpq, skb);
903 np->rx.rsp_cons += skb_queue_len(&tmpq);
908 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
909 NETFRONT_SKB_CB(skb)->offset = rx->offset;
912 if (len > RX_COPY_THRESHOLD)
913 len = RX_COPY_THRESHOLD;
916 if (rx->status > len) {
917 skb_shinfo(skb)->frags[0].page_offset =
919 skb_shinfo(skb)->frags[0].size = rx->status - len;
920 skb->data_len = rx->status - len;
922 skb_shinfo(skb)->frags[0].page = NULL;
923 skb_shinfo(skb)->nr_frags = 0;
926 i = xennet_fill_frags(np, skb, &tmpq);
929 * Truesize approximates the size of true data plus
930 * any supervisor overheads. Adding hypervisor
931 * overheads has been shown to significantly reduce
932 * achievable bandwidth with the default receive
933 * buffer size. It is therefore not wise to account
936 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set
937 * to RX_COPY_THRESHOLD + the supervisor
938 * overheads. Here, we add the size of the data pulled
939 * in xennet_fill_frags().
941 * We also adjust for any unused space in the main
942 * data area by subtracting (RX_COPY_THRESHOLD -
943 * len). This is especially important with drivers
944 * which split incoming packets into header and data,
945 * using only 66 bytes of the main data area (see the
946 * e1000 driver for example.) On such systems,
947 * without this last adjustement, our achievable
948 * receive throughout using the standard receive
949 * buffer size was cut by 25%(!!!).
951 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
952 skb->len += skb->data_len;
954 if (rx->flags & NETRXF_csum_blank)
955 skb->ip_summed = CHECKSUM_PARTIAL;
956 else if (rx->flags & NETRXF_data_validated)
957 skb->ip_summed = CHECKSUM_UNNECESSARY;
959 __skb_queue_tail(&rxq, skb);
961 np->rx.rsp_cons = ++i;
965 while ((skb = __skb_dequeue(&errq)))
968 work_done -= handle_incoming_queue(dev, &rxq);
970 /* If we get a callback with very few responses, reduce fill target. */
971 /* NB. Note exponential increase, linear decrease. */
972 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
973 ((3*np->rx_target) / 4)) &&
974 (--np->rx_target < np->rx_min_target))
975 np->rx_target = np->rx_min_target;
977 xennet_alloc_rx_buffers(dev);
979 if (work_done < budget) {
982 local_irq_save(flags);
984 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
986 __netif_rx_complete(dev, napi);
988 local_irq_restore(flags);
991 spin_unlock(&np->rx_lock);
996 static int xennet_change_mtu(struct net_device *dev, int mtu)
998 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1006 static void xennet_release_tx_bufs(struct netfront_info *np)
1008 struct sk_buff *skb;
1011 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1012 /* Skip over entries which are actually freelist references */
1013 if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET)
1016 skb = np->tx_skbs[i].skb;
1017 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1019 gnttab_release_grant_reference(&np->gref_tx_head,
1020 np->grant_tx_ref[i]);
1021 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1022 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1023 dev_kfree_skb_irq(skb);
1027 static void xennet_release_rx_bufs(struct netfront_info *np)
1029 struct mmu_update *mmu = np->rx_mmu;
1030 struct multicall_entry *mcl = np->rx_mcl;
1031 struct sk_buff_head free_list;
1032 struct sk_buff *skb;
1034 int xfer = 0, noxfer = 0, unused = 0;
1037 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1041 skb_queue_head_init(&free_list);
1043 spin_lock_bh(&np->rx_lock);
1045 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1046 ref = np->grant_rx_ref[id];
1047 if (ref == GRANT_INVALID_REF) {
1052 skb = np->rx_skbs[id];
1053 mfn = gnttab_end_foreign_transfer_ref(ref);
1054 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1055 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1058 skb_shinfo(skb)->nr_frags = 0;
1064 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1065 /* Remap the page. */
1066 struct page *page = skb_shinfo(skb)->frags[0].page;
1067 unsigned long pfn = page_to_pfn(page);
1068 void *vaddr = page_address(page);
1070 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1071 mfn_pte(mfn, PAGE_KERNEL),
1074 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1075 | MMU_MACHPHYS_UPDATE;
1079 set_phys_to_machine(pfn, mfn);
1081 __skb_queue_tail(&free_list, skb);
1085 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1086 __func__, xfer, noxfer, unused);
1089 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1090 /* Do all the remapping work and M2P updates. */
1091 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1094 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1098 while ((skb = __skb_dequeue(&free_list)) != NULL)
1101 spin_unlock_bh(&np->rx_lock);
1104 static void xennet_uninit(struct net_device *dev)
1106 struct netfront_info *np = netdev_priv(dev);
1107 xennet_release_tx_bufs(np);
1108 xennet_release_rx_bufs(np);
1109 gnttab_free_grant_references(np->gref_tx_head);
1110 gnttab_free_grant_references(np->gref_rx_head);
1113 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
1116 struct net_device *netdev;
1117 struct netfront_info *np;
1119 netdev = alloc_etherdev(sizeof(struct netfront_info));
1121 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1123 return ERR_PTR(-ENOMEM);
1126 np = netdev_priv(netdev);
1129 spin_lock_init(&np->tx_lock);
1130 spin_lock_init(&np->rx_lock);
1132 skb_queue_head_init(&np->rx_batch);
1133 np->rx_target = RX_DFL_MIN_TARGET;
1134 np->rx_min_target = RX_DFL_MIN_TARGET;
1135 np->rx_max_target = RX_MAX_TARGET;
1137 init_timer(&np->rx_refill_timer);
1138 np->rx_refill_timer.data = (unsigned long)netdev;
1139 np->rx_refill_timer.function = rx_refill_timeout;
1141 /* Initialise tx_skbs as a free chain containing every entry. */
1142 np->tx_skb_freelist = 0;
1143 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1144 np->tx_skbs[i].link = i+1;
1145 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1148 /* Clear out rx_skbs */
1149 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1150 np->rx_skbs[i] = NULL;
1151 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1154 /* A grant for every tx ring slot */
1155 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1156 &np->gref_tx_head) < 0) {
1157 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1161 /* A grant for every rx ring slot */
1162 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1163 &np->gref_rx_head) < 0) {
1164 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1169 netdev->open = xennet_open;
1170 netdev->hard_start_xmit = xennet_start_xmit;
1171 netdev->stop = xennet_close;
1172 netdev->get_stats = xennet_get_stats;
1173 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1174 netdev->uninit = xennet_uninit;
1175 netdev->change_mtu = xennet_change_mtu;
1176 netdev->features = NETIF_F_IP_CSUM;
1178 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1179 SET_MODULE_OWNER(netdev);
1180 SET_NETDEV_DEV(netdev, &dev->dev);
1182 np->netdev = netdev;
1184 netif_carrier_off(netdev);
1189 gnttab_free_grant_references(np->gref_tx_head);
1191 free_netdev(netdev);
1192 return ERR_PTR(err);
1196 * Entry point to this code when a new device is created. Allocate the basic
1197 * structures and the ring buffers for communication with the backend, and
1198 * inform the backend of the appropriate details for those.
1200 static int __devinit netfront_probe(struct xenbus_device *dev,
1201 const struct xenbus_device_id *id)
1204 struct net_device *netdev;
1205 struct netfront_info *info;
1207 netdev = xennet_create_dev(dev);
1208 if (IS_ERR(netdev)) {
1209 err = PTR_ERR(netdev);
1210 xenbus_dev_fatal(dev, err, "creating netdev");
1214 info = netdev_priv(netdev);
1215 dev->dev.driver_data = info;
1217 err = register_netdev(info->netdev);
1219 printk(KERN_WARNING "%s: register_netdev err=%d\n",
1224 err = xennet_sysfs_addif(info->netdev);
1226 unregister_netdev(info->netdev);
1227 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
1235 free_netdev(netdev);
1236 dev->dev.driver_data = NULL;
1240 static void xennet_end_access(int ref, void *page)
1242 /* This frees the page as a side-effect */
1243 if (ref != GRANT_INVALID_REF)
1244 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1247 static void xennet_disconnect_backend(struct netfront_info *info)
1249 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1250 spin_lock_bh(&info->rx_lock);
1251 spin_lock_irq(&info->tx_lock);
1252 netif_carrier_off(info->netdev);
1253 spin_unlock_irq(&info->tx_lock);
1254 spin_unlock_bh(&info->rx_lock);
1256 if (info->netdev->irq)
1257 unbind_from_irqhandler(info->netdev->irq, info->netdev);
1258 info->evtchn = info->netdev->irq = 0;
1260 /* End access and free the pages */
1261 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1262 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1264 info->tx_ring_ref = GRANT_INVALID_REF;
1265 info->rx_ring_ref = GRANT_INVALID_REF;
1266 info->tx.sring = NULL;
1267 info->rx.sring = NULL;
1271 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1272 * driver restart. We tear down our netif structure and recreate it, but
1273 * leave the device-layer structures intact so that this is transparent to the
1274 * rest of the kernel.
1276 static int netfront_resume(struct xenbus_device *dev)
1278 struct netfront_info *info = dev->dev.driver_data;
1280 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1282 xennet_disconnect_backend(info);
1286 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1288 char *s, *e, *macstr;
1291 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1293 return PTR_ERR(macstr);
1295 for (i = 0; i < ETH_ALEN; i++) {
1296 mac[i] = simple_strtoul(s, &e, 16);
1297 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1308 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1310 struct net_device *dev = dev_id;
1311 struct netfront_info *np = netdev_priv(dev);
1312 unsigned long flags;
1314 spin_lock_irqsave(&np->tx_lock, flags);
1316 if (likely(netif_carrier_ok(dev))) {
1317 xennet_tx_buf_gc(dev);
1318 /* Under tx_lock: protects access to rx shared-ring indexes. */
1319 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1320 netif_rx_schedule(dev, &np->napi);
1323 spin_unlock_irqrestore(&np->tx_lock, flags);
1328 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1330 struct xen_netif_tx_sring *txs;
1331 struct xen_netif_rx_sring *rxs;
1333 struct net_device *netdev = info->netdev;
1335 info->tx_ring_ref = GRANT_INVALID_REF;
1336 info->rx_ring_ref = GRANT_INVALID_REF;
1337 info->rx.sring = NULL;
1338 info->tx.sring = NULL;
1341 err = xen_net_read_mac(dev, netdev->dev_addr);
1343 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1347 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
1350 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1353 SHARED_RING_INIT(txs);
1354 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1356 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1358 free_page((unsigned long)txs);
1362 info->tx_ring_ref = err;
1363 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
1366 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1369 SHARED_RING_INIT(rxs);
1370 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1372 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1374 free_page((unsigned long)rxs);
1377 info->rx_ring_ref = err;
1379 err = xenbus_alloc_evtchn(dev, &info->evtchn);
1383 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
1384 IRQF_SAMPLE_RANDOM, netdev->name,
1395 /* Common code used when first setting up, and when resuming. */
1396 static int talk_to_backend(struct xenbus_device *dev,
1397 struct netfront_info *info)
1399 const char *message;
1400 struct xenbus_transaction xbt;
1403 /* Create shared ring, alloc event channel. */
1404 err = setup_netfront(dev, info);
1409 err = xenbus_transaction_start(&xbt);
1411 xenbus_dev_fatal(dev, err, "starting transaction");
1415 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1418 message = "writing tx ring-ref";
1419 goto abort_transaction;
1421 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1424 message = "writing rx ring-ref";
1425 goto abort_transaction;
1427 err = xenbus_printf(xbt, dev->nodename,
1428 "event-channel", "%u", info->evtchn);
1430 message = "writing event-channel";
1431 goto abort_transaction;
1434 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1437 message = "writing request-rx-copy";
1438 goto abort_transaction;
1441 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1443 message = "writing feature-rx-notify";
1444 goto abort_transaction;
1447 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1449 message = "writing feature-sg";
1450 goto abort_transaction;
1453 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1455 message = "writing feature-gso-tcpv4";
1456 goto abort_transaction;
1459 err = xenbus_transaction_end(xbt, 0);
1463 xenbus_dev_fatal(dev, err, "completing transaction");
1470 xenbus_transaction_end(xbt, 1);
1471 xenbus_dev_fatal(dev, err, "%s", message);
1473 xennet_disconnect_backend(info);
1478 static int xennet_set_sg(struct net_device *dev, u32 data)
1481 struct netfront_info *np = netdev_priv(dev);
1484 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1489 } else if (dev->mtu > ETH_DATA_LEN)
1490 dev->mtu = ETH_DATA_LEN;
1492 return ethtool_op_set_sg(dev, data);
1495 static int xennet_set_tso(struct net_device *dev, u32 data)
1498 struct netfront_info *np = netdev_priv(dev);
1501 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1502 "feature-gso-tcpv4", "%d", &val) < 0)
1508 return ethtool_op_set_tso(dev, data);
1511 static void xennet_set_features(struct net_device *dev)
1513 /* Turn off all GSO bits except ROBUST. */
1514 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
1515 dev->features |= NETIF_F_GSO_ROBUST;
1516 xennet_set_sg(dev, 0);
1518 /* We need checksum offload to enable scatter/gather and TSO. */
1519 if (!(dev->features & NETIF_F_IP_CSUM))
1522 if (!xennet_set_sg(dev, 1))
1523 xennet_set_tso(dev, 1);
1526 static int xennet_connect(struct net_device *dev)
1528 struct netfront_info *np = netdev_priv(dev);
1529 int i, requeue_idx, err;
1530 struct sk_buff *skb;
1532 struct xen_netif_rx_request *req;
1533 unsigned int feature_rx_copy;
1535 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1536 "feature-rx-copy", "%u", &feature_rx_copy);
1538 feature_rx_copy = 0;
1540 if (!feature_rx_copy) {
1542 "backend does not support copying recieve path");
1546 err = talk_to_backend(np->xbdev, np);
1550 xennet_set_features(dev);
1552 spin_lock_bh(&np->rx_lock);
1553 spin_lock_irq(&np->tx_lock);
1555 /* Step 1: Discard all pending TX packet fragments. */
1556 xennet_release_tx_bufs(np);
1558 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1559 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1560 if (!np->rx_skbs[i])
1563 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1564 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1565 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1567 gnttab_grant_foreign_access_ref(
1568 ref, np->xbdev->otherend_id,
1569 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1573 req->id = requeue_idx;
1578 np->rx.req_prod_pvt = requeue_idx;
1581 * Step 3: All public and private state should now be sane. Get
1582 * ready to start sending and receiving packets and give the driver
1583 * domain a kick because we've probably just requeued some
1586 netif_carrier_on(np->netdev);
1587 notify_remote_via_irq(np->netdev->irq);
1588 xennet_tx_buf_gc(dev);
1589 xennet_alloc_rx_buffers(dev);
1591 spin_unlock_irq(&np->tx_lock);
1592 spin_unlock_bh(&np->rx_lock);
1598 * Callback received when the backend's state changes.
1600 static void backend_changed(struct xenbus_device *dev,
1601 enum xenbus_state backend_state)
1603 struct netfront_info *np = dev->dev.driver_data;
1604 struct net_device *netdev = np->netdev;
1606 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1608 switch (backend_state) {
1609 case XenbusStateInitialising:
1610 case XenbusStateInitialised:
1611 case XenbusStateConnected:
1612 case XenbusStateUnknown:
1613 case XenbusStateClosed:
1616 case XenbusStateInitWait:
1617 if (dev->state != XenbusStateInitialising)
1619 if (xennet_connect(netdev) != 0)
1621 xenbus_switch_state(dev, XenbusStateConnected);
1624 case XenbusStateClosing:
1625 xenbus_frontend_closed(dev);
1630 static struct ethtool_ops xennet_ethtool_ops =
1632 .get_tx_csum = ethtool_op_get_tx_csum,
1633 .set_tx_csum = ethtool_op_set_tx_csum,
1634 .get_sg = ethtool_op_get_sg,
1635 .set_sg = xennet_set_sg,
1636 .get_tso = ethtool_op_get_tso,
1637 .set_tso = xennet_set_tso,
1638 .get_link = ethtool_op_get_link,
1642 static ssize_t show_rxbuf_min(struct device *dev,
1643 struct device_attribute *attr, char *buf)
1645 struct net_device *netdev = to_net_dev(dev);
1646 struct netfront_info *info = netdev_priv(netdev);
1648 return sprintf(buf, "%u\n", info->rx_min_target);
1651 static ssize_t store_rxbuf_min(struct device *dev,
1652 struct device_attribute *attr,
1653 const char *buf, size_t len)
1655 struct net_device *netdev = to_net_dev(dev);
1656 struct netfront_info *np = netdev_priv(netdev);
1658 unsigned long target;
1660 if (!capable(CAP_NET_ADMIN))
1663 target = simple_strtoul(buf, &endp, 0);
1667 if (target < RX_MIN_TARGET)
1668 target = RX_MIN_TARGET;
1669 if (target > RX_MAX_TARGET)
1670 target = RX_MAX_TARGET;
1672 spin_lock_bh(&np->rx_lock);
1673 if (target > np->rx_max_target)
1674 np->rx_max_target = target;
1675 np->rx_min_target = target;
1676 if (target > np->rx_target)
1677 np->rx_target = target;
1679 xennet_alloc_rx_buffers(netdev);
1681 spin_unlock_bh(&np->rx_lock);
1685 static ssize_t show_rxbuf_max(struct device *dev,
1686 struct device_attribute *attr, char *buf)
1688 struct net_device *netdev = to_net_dev(dev);
1689 struct netfront_info *info = netdev_priv(netdev);
1691 return sprintf(buf, "%u\n", info->rx_max_target);
1694 static ssize_t store_rxbuf_max(struct device *dev,
1695 struct device_attribute *attr,
1696 const char *buf, size_t len)
1698 struct net_device *netdev = to_net_dev(dev);
1699 struct netfront_info *np = netdev_priv(netdev);
1701 unsigned long target;
1703 if (!capable(CAP_NET_ADMIN))
1706 target = simple_strtoul(buf, &endp, 0);
1710 if (target < RX_MIN_TARGET)
1711 target = RX_MIN_TARGET;
1712 if (target > RX_MAX_TARGET)
1713 target = RX_MAX_TARGET;
1715 spin_lock_bh(&np->rx_lock);
1716 if (target < np->rx_min_target)
1717 np->rx_min_target = target;
1718 np->rx_max_target = target;
1719 if (target < np->rx_target)
1720 np->rx_target = target;
1722 xennet_alloc_rx_buffers(netdev);
1724 spin_unlock_bh(&np->rx_lock);
1728 static ssize_t show_rxbuf_cur(struct device *dev,
1729 struct device_attribute *attr, char *buf)
1731 struct net_device *netdev = to_net_dev(dev);
1732 struct netfront_info *info = netdev_priv(netdev);
1734 return sprintf(buf, "%u\n", info->rx_target);
1737 static struct device_attribute xennet_attrs[] = {
1738 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1739 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1740 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1743 static int xennet_sysfs_addif(struct net_device *netdev)
1748 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1749 err = device_create_file(&netdev->dev,
1758 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1762 static void xennet_sysfs_delif(struct net_device *netdev)
1766 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1767 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1770 #endif /* CONFIG_SYSFS */
1772 static struct xenbus_device_id netfront_ids[] = {
1778 static int __devexit xennet_remove(struct xenbus_device *dev)
1780 struct netfront_info *info = dev->dev.driver_data;
1782 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1784 unregister_netdev(info->netdev);
1786 xennet_disconnect_backend(info);
1788 del_timer_sync(&info->rx_refill_timer);
1790 xennet_sysfs_delif(info->netdev);
1792 free_netdev(info->netdev);
1797 static struct xenbus_driver netfront = {
1799 .owner = THIS_MODULE,
1800 .ids = netfront_ids,
1801 .probe = netfront_probe,
1802 .remove = __devexit_p(xennet_remove),
1803 .resume = netfront_resume,
1804 .otherend_changed = backend_changed,
1807 static int __init netif_init(void)
1809 if (!is_running_on_xen())
1812 if (is_initial_xendomain())
1815 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
1817 return xenbus_register_frontend(&netfront);
1819 module_init(netif_init);
1822 static void __exit netif_exit(void)
1824 if (is_initial_xendomain())
1827 return xenbus_unregister_driver(&netfront);
1829 module_exit(netif_exit);
1831 MODULE_DESCRIPTION("Xen virtual network device frontend");
1832 MODULE_LICENSE("GPL");