1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
48 #include <xen/features.h>
52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
56 struct xenbus_map_node {
57 struct list_head next;
60 struct vm_struct *area;
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
72 struct map_ring_valloc {
73 struct xenbus_map_node *node;
75 /* Why do we need two arrays? See comment of __xenbus_map_ring */
77 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
78 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
80 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
82 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
83 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
85 unsigned int idx; /* HVM only. */
88 static DEFINE_SPINLOCK(xenbus_valloc_lock);
89 static LIST_HEAD(xenbus_valloc_pages);
91 struct xenbus_ring_ops {
92 int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
93 grant_ref_t *gnt_refs, unsigned int nr_grefs,
95 int (*unmap)(struct xenbus_device *dev, void *vaddr);
98 static const struct xenbus_ring_ops *ring_ops __read_mostly;
100 const char *xenbus_strstate(enum xenbus_state state)
102 static const char *const name[] = {
103 [ XenbusStateUnknown ] = "Unknown",
104 [ XenbusStateInitialising ] = "Initialising",
105 [ XenbusStateInitWait ] = "InitWait",
106 [ XenbusStateInitialised ] = "Initialised",
107 [ XenbusStateConnected ] = "Connected",
108 [ XenbusStateClosing ] = "Closing",
109 [ XenbusStateClosed ] = "Closed",
110 [XenbusStateReconfiguring] = "Reconfiguring",
111 [XenbusStateReconfigured] = "Reconfigured",
113 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
115 EXPORT_SYMBOL_GPL(xenbus_strstate);
118 * xenbus_watch_path - register a watch
119 * @dev: xenbus device
120 * @path: path to watch
121 * @watch: watch to register
122 * @callback: callback to register
124 * Register a @watch on the given path, using the given xenbus_watch structure
125 * for storage, and the given @callback function as the callback. Return 0 on
126 * success, or -errno on error. On success, the given @path will be saved as
127 * @watch->node, and remains the caller's to free. On error, @watch->node will
128 * be NULL, the device will switch to %XenbusStateClosing, and the error will
129 * be saved in the store.
131 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
132 struct xenbus_watch *watch,
133 void (*callback)(struct xenbus_watch *,
134 const char *, const char *))
139 watch->callback = callback;
141 err = register_xenbus_watch(watch);
145 watch->callback = NULL;
146 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
151 EXPORT_SYMBOL_GPL(xenbus_watch_path);
155 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
156 * @dev: xenbus device
157 * @watch: watch to register
158 * @callback: callback to register
159 * @pathfmt: format of path to watch
161 * Register a watch on the given @path, using the given xenbus_watch
162 * structure for storage, and the given @callback function as the callback.
163 * Return 0 on success, or -errno on error. On success, the watched path
164 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
165 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
166 * free, the device will switch to %XenbusStateClosing, and the error will be
167 * saved in the store.
169 int xenbus_watch_pathfmt(struct xenbus_device *dev,
170 struct xenbus_watch *watch,
171 void (*callback)(struct xenbus_watch *,
172 const char *, const char *),
173 const char *pathfmt, ...)
179 va_start(ap, pathfmt);
180 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
184 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
187 err = xenbus_watch_path(dev, path, watch, callback);
193 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
195 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
199 __xenbus_switch_state(struct xenbus_device *dev,
200 enum xenbus_state state, int depth)
202 /* We check whether the state is currently set to the given value, and
203 if not, then the state is set. We don't want to unconditionally
204 write the given state, because we don't want to fire watches
205 unnecessarily. Furthermore, if the node has gone, we don't write
206 to it, as the device will be tearing down, and we don't want to
207 resurrect that directory.
209 Note that, because of this cached value of our state, this
210 function will not take a caller's Xenstore transaction
211 (something it was trying to in the past) because dev->state
212 would not get reset if the transaction was aborted.
215 struct xenbus_transaction xbt;
219 if (state == dev->state)
225 err = xenbus_transaction_start(&xbt);
227 xenbus_switch_fatal(dev, depth, err, "starting transaction");
231 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
235 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
237 xenbus_switch_fatal(dev, depth, err, "writing new state");
243 err = xenbus_transaction_end(xbt, abort);
245 if (err == -EAGAIN && !abort)
247 xenbus_switch_fatal(dev, depth, err, "ending transaction");
255 * xenbus_switch_state
256 * @dev: xenbus device
259 * Advertise in the store a change of the given driver to the given new_state.
260 * Return 0 on success, or -errno on error. On error, the device will switch
261 * to XenbusStateClosing, and the error will be saved in the store.
263 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
265 return __xenbus_switch_state(dev, state, 0);
268 EXPORT_SYMBOL_GPL(xenbus_switch_state);
270 int xenbus_frontend_closed(struct xenbus_device *dev)
272 xenbus_switch_state(dev, XenbusStateClosed);
273 complete(&dev->down);
276 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
278 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
279 const char *fmt, va_list ap)
285 #define PRINTF_BUFFER_SIZE 4096
287 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
291 len = sprintf(printf_buffer, "%i ", -err);
292 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
294 dev_err(&dev->dev, "%s\n", printf_buffer);
296 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
298 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
300 kfree(printf_buffer);
306 * @dev: xenbus device
307 * @err: error to report
308 * @fmt: error message format
310 * Report the given negative errno into the store, along with the given
313 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
318 xenbus_va_dev_error(dev, err, fmt, ap);
321 EXPORT_SYMBOL_GPL(xenbus_dev_error);
325 * @dev: xenbus device
326 * @err: error to report
327 * @fmt: error message format
329 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
330 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
331 * closedown of this driver and its peer.
334 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
339 xenbus_va_dev_error(dev, err, fmt, ap);
342 xenbus_switch_state(dev, XenbusStateClosing);
344 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
347 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
348 * avoiding recursion within xenbus_switch_state.
350 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
351 const char *fmt, ...)
356 xenbus_va_dev_error(dev, err, fmt, ap);
360 __xenbus_switch_state(dev, XenbusStateClosing, 1);
365 * @dev: xenbus device
366 * @vaddr: starting virtual address of the ring
367 * @nr_pages: number of pages to be granted
368 * @grefs: grant reference array to be filled in
370 * Grant access to the given @vaddr to the peer of the given device.
371 * Then fill in @grefs with grant references. Return 0 on success, or
372 * -errno on error. On error, the device will switch to
373 * XenbusStateClosing, and the error will be saved in the store.
375 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
376 unsigned int nr_pages, grant_ref_t *grefs)
381 for (i = 0; i < nr_pages; i++) {
384 if (is_vmalloc_addr(vaddr))
385 gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
387 gfn = virt_to_gfn(vaddr);
389 err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
391 xenbus_dev_fatal(dev, err,
392 "granting access to ring page");
397 vaddr = vaddr + XEN_PAGE_SIZE;
403 for (j = 0; j < i; j++)
404 gnttab_end_foreign_access_ref(grefs[j], 0);
407 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
411 * Allocate an event channel for the given xenbus_device, assigning the newly
412 * created local port to *port. Return 0 on success, or -errno on error. On
413 * error, the device will switch to XenbusStateClosing, and the error will be
414 * saved in the store.
416 int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
418 struct evtchn_alloc_unbound alloc_unbound;
421 alloc_unbound.dom = DOMID_SELF;
422 alloc_unbound.remote_dom = dev->otherend_id;
424 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
427 xenbus_dev_fatal(dev, err, "allocating event channel");
429 *port = alloc_unbound.port;
433 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
437 * Free an existing event channel. Returns 0 on success or -errno on error.
439 int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
441 struct evtchn_close close;
446 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
448 xenbus_dev_error(dev, err, "freeing event channel %u", port);
452 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
456 * xenbus_map_ring_valloc
457 * @dev: xenbus device
458 * @gnt_refs: grant reference array
459 * @nr_grefs: number of grant references
460 * @vaddr: pointer to address to be filled out by mapping
462 * Map @nr_grefs pages of memory into this domain from another
463 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
464 * pages of virtual address space, maps the pages to that address, and
465 * sets *vaddr to that address. Returns 0 on success, and -errno on
466 * error. If an error is returned, device will switch to
467 * XenbusStateClosing and the error message will be saved in XenStore.
469 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
470 unsigned int nr_grefs, void **vaddr)
473 struct map_ring_valloc *info;
477 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
480 info = kzalloc(sizeof(*info), GFP_KERNEL);
484 info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
488 err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
494 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
496 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
497 * long), e.g. 32-on-64. Caller is responsible for preparing the
498 * right array to feed into this function */
499 static int __xenbus_map_ring(struct xenbus_device *dev,
500 grant_ref_t *gnt_refs,
501 unsigned int nr_grefs,
502 grant_handle_t *handles,
503 struct map_ring_valloc *info,
509 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
512 for (i = 0; i < nr_grefs; i++) {
513 gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
514 gnt_refs[i], dev->otherend_id);
515 handles[i] = INVALID_GRANT_HANDLE;
518 gnttab_batch_map(info->map, i);
520 for (i = 0; i < nr_grefs; i++) {
521 if (info->map[i].status != GNTST_okay) {
522 xenbus_dev_fatal(dev, info->map[i].status,
523 "mapping in shared page %d from domain %d",
524 gnt_refs[i], dev->otherend_id);
527 handles[i] = info->map[i].handle;
533 for (i = j = 0; i < nr_grefs; i++) {
534 if (handles[i] != INVALID_GRANT_HANDLE) {
535 gnttab_set_unmap_op(&info->unmap[j],
537 GNTMAP_host_map, handles[i]);
542 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
546 for (i = 0; i < j; i++) {
547 if (info->unmap[i].status != GNTST_okay) {
558 * @dev: xenbus device
559 * @handles: grant handle array
560 * @nr_handles: number of handles in the array
561 * @vaddrs: addresses to unmap
563 * Unmap memory in this domain that was imported from another domain.
564 * Returns 0 on success and returns GNTST_* on error
565 * (see xen/include/interface/grant_table.h).
567 static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
568 unsigned int nr_handles, unsigned long *vaddrs)
570 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
574 if (nr_handles > XENBUS_MAX_RING_GRANTS)
577 for (i = 0; i < nr_handles; i++)
578 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
579 GNTMAP_host_map, handles[i]);
581 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
585 for (i = 0; i < nr_handles; i++) {
586 if (unmap[i].status != GNTST_okay) {
587 xenbus_dev_error(dev, unmap[i].status,
588 "unmapping page at handle %d error %d",
589 handles[i], unmap[i].status);
590 err = unmap[i].status;
598 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
599 unsigned int goffset,
603 struct map_ring_valloc *info = data;
604 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
606 info->phys_addrs[info->idx] = vaddr;
607 info->addrs[info->idx] = vaddr;
612 static int xenbus_map_ring_hvm(struct xenbus_device *dev,
613 struct map_ring_valloc *info,
614 grant_ref_t *gnt_ref,
615 unsigned int nr_grefs,
618 struct xenbus_map_node *node = info->node;
622 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
624 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
628 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
629 xenbus_map_ring_setup_grant_hvm,
632 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
633 info, GNTMAP_host_map, &leaked);
634 node->nr_handles = nr_grefs;
637 goto out_free_ballooned_pages;
639 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
643 goto out_xenbus_unmap_ring;
646 node->hvm.addr = addr;
648 spin_lock(&xenbus_valloc_lock);
649 list_add(&node->next, &xenbus_valloc_pages);
650 spin_unlock(&xenbus_valloc_lock);
657 out_xenbus_unmap_ring:
659 xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
661 pr_alert("leaking %p size %u page(s)",
663 out_free_ballooned_pages:
665 free_xenballooned_pages(nr_pages, node->hvm.pages);
671 * xenbus_unmap_ring_vfree
672 * @dev: xenbus device
673 * @vaddr: addr to unmap
675 * Based on Rusty Russell's skeleton driver's unmap_page.
676 * Unmap a page of memory in this domain that was imported from another domain.
677 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
678 * xenbus_map_ring_valloc (it will free the virtual address space).
679 * Returns 0 on success and returns GNTST_* on error
680 * (see xen/include/interface/grant_table.h).
682 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
684 return ring_ops->unmap(dev, vaddr);
686 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
689 static int xenbus_map_ring_pv(struct xenbus_device *dev,
690 struct map_ring_valloc *info,
691 grant_ref_t *gnt_refs,
692 unsigned int nr_grefs,
695 struct xenbus_map_node *node = info->node;
696 struct vm_struct *area;
697 int err = GNTST_okay;
701 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
705 for (i = 0; i < nr_grefs; i++)
706 info->phys_addrs[i] =
707 arbitrary_virt_to_machine(info->ptes[i]).maddr;
709 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
710 info, GNTMAP_host_map | GNTMAP_contains_pte,
715 node->nr_handles = nr_grefs;
716 node->pv.area = area;
718 spin_lock(&xenbus_valloc_lock);
719 list_add(&node->next, &xenbus_valloc_pages);
720 spin_unlock(&xenbus_valloc_lock);
731 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
736 static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
738 struct xenbus_map_node *node;
739 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
745 spin_lock(&xenbus_valloc_lock);
746 list_for_each_entry(node, &xenbus_valloc_pages, next) {
747 if (node->pv.area->addr == vaddr) {
748 list_del(&node->next);
754 spin_unlock(&xenbus_valloc_lock);
757 xenbus_dev_error(dev, -ENOENT,
758 "can't find mapped virtual address %p", vaddr);
759 return GNTST_bad_virt_addr;
762 for (i = 0; i < node->nr_handles; i++) {
765 memset(&unmap[i], 0, sizeof(unmap[i]));
766 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
767 unmap[i].host_addr = arbitrary_virt_to_machine(
768 lookup_address(addr, &level)).maddr;
769 unmap[i].dev_bus_addr = 0;
770 unmap[i].handle = node->handles[i];
773 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
778 for (i = 0; i < node->nr_handles; i++) {
779 if (unmap[i].status != GNTST_okay) {
781 xenbus_dev_error(dev, unmap[i].status,
782 "unmapping page at handle %d error %d",
783 node->handles[i], unmap[i].status);
784 err = unmap[i].status;
790 free_vm_area(node->pv.area);
792 pr_alert("leaking VM area %p size %u page(s)",
793 node->pv.area, node->nr_handles);
799 static const struct xenbus_ring_ops ring_ops_pv = {
800 .map = xenbus_map_ring_pv,
801 .unmap = xenbus_unmap_ring_pv,
805 struct unmap_ring_hvm
808 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
811 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
812 unsigned int goffset,
816 struct unmap_ring_hvm *info = data;
818 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
823 static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
826 struct xenbus_map_node *node;
828 struct unmap_ring_hvm info = {
831 unsigned int nr_pages;
833 spin_lock(&xenbus_valloc_lock);
834 list_for_each_entry(node, &xenbus_valloc_pages, next) {
835 addr = node->hvm.addr;
837 list_del(&node->next);
843 spin_unlock(&xenbus_valloc_lock);
846 xenbus_dev_error(dev, -ENOENT,
847 "can't find mapped virtual address %p", vaddr);
848 return GNTST_bad_virt_addr;
851 nr_pages = XENBUS_PAGES(node->nr_handles);
853 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
854 xenbus_unmap_ring_setup_grant_hvm,
857 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
861 free_xenballooned_pages(nr_pages, node->hvm.pages);
864 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
871 * xenbus_read_driver_state
872 * @path: path for driver
874 * Return the state of the driver rooted at the given store path, or
875 * XenbusStateUnknown if no state can be read.
877 enum xenbus_state xenbus_read_driver_state(const char *path)
879 enum xenbus_state result;
880 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
882 result = XenbusStateUnknown;
886 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
888 static const struct xenbus_ring_ops ring_ops_hvm = {
889 .map = xenbus_map_ring_hvm,
890 .unmap = xenbus_unmap_ring_hvm,
893 void __init xenbus_ring_ops_init(void)
896 if (!xen_feature(XENFEAT_auto_translated_physmap))
897 ring_ops = &ring_ops_pv;
900 ring_ops = &ring_ops_hvm;