1 /******************************************************************************
2 * Client-facing interface for the Xenbus driver. In other words, the
3 * interface between the Xenbus and the device-specific code, be it the
4 * frontend or the backend of that driver.
6 * Copyright (C) 2005 XenSource Ltd
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/slab.h>
35 #include <linux/types.h>
36 #include <linux/spinlock.h>
37 #include <linux/vmalloc.h>
38 #include <linux/export.h>
39 #include <asm/xen/hypervisor.h>
41 #include <xen/interface/xen.h>
42 #include <xen/interface/event_channel.h>
43 #include <xen/balloon.h>
44 #include <xen/events.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
48 #include <xen/features.h>
52 #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
54 #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
56 struct xenbus_map_node {
57 struct list_head next;
60 struct vm_struct *area;
63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
68 grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69 unsigned int nr_handles;
72 static DEFINE_SPINLOCK(xenbus_valloc_lock);
73 static LIST_HEAD(xenbus_valloc_pages);
75 struct xenbus_ring_ops {
76 int (*map)(struct xenbus_device *dev,
77 grant_ref_t *gnt_refs, unsigned int nr_grefs,
79 int (*unmap)(struct xenbus_device *dev, void *vaddr);
82 static const struct xenbus_ring_ops *ring_ops __read_mostly;
84 const char *xenbus_strstate(enum xenbus_state state)
86 static const char *const name[] = {
87 [ XenbusStateUnknown ] = "Unknown",
88 [ XenbusStateInitialising ] = "Initialising",
89 [ XenbusStateInitWait ] = "InitWait",
90 [ XenbusStateInitialised ] = "Initialised",
91 [ XenbusStateConnected ] = "Connected",
92 [ XenbusStateClosing ] = "Closing",
93 [ XenbusStateClosed ] = "Closed",
94 [XenbusStateReconfiguring] = "Reconfiguring",
95 [XenbusStateReconfigured] = "Reconfigured",
97 return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
99 EXPORT_SYMBOL_GPL(xenbus_strstate);
102 * xenbus_watch_path - register a watch
103 * @dev: xenbus device
104 * @path: path to watch
105 * @watch: watch to register
106 * @callback: callback to register
108 * Register a @watch on the given path, using the given xenbus_watch structure
109 * for storage, and the given @callback function as the callback. Return 0 on
110 * success, or -errno on error. On success, the given @path will be saved as
111 * @watch->node, and remains the caller's to free. On error, @watch->node will
112 * be NULL, the device will switch to %XenbusStateClosing, and the error will
113 * be saved in the store.
115 int xenbus_watch_path(struct xenbus_device *dev, const char *path,
116 struct xenbus_watch *watch,
117 bool (*will_handle)(struct xenbus_watch *,
118 const char *, const char *),
119 void (*callback)(struct xenbus_watch *,
120 const char *, const char *))
125 watch->will_handle = will_handle;
126 watch->callback = callback;
128 err = register_xenbus_watch(watch);
132 watch->will_handle = NULL;
133 watch->callback = NULL;
134 xenbus_dev_fatal(dev, err, "adding watch on %s", path);
139 EXPORT_SYMBOL_GPL(xenbus_watch_path);
143 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
144 * @dev: xenbus device
145 * @watch: watch to register
146 * @callback: callback to register
147 * @pathfmt: format of path to watch
149 * Register a watch on the given @path, using the given xenbus_watch
150 * structure for storage, and the given @callback function as the callback.
151 * Return 0 on success, or -errno on error. On success, the watched path
152 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
153 * kfree(). On error, watch->node will be NULL, so the caller has nothing to
154 * free, the device will switch to %XenbusStateClosing, and the error will be
155 * saved in the store.
157 int xenbus_watch_pathfmt(struct xenbus_device *dev,
158 struct xenbus_watch *watch,
159 bool (*will_handle)(struct xenbus_watch *,
160 const char *, const char *),
161 void (*callback)(struct xenbus_watch *,
162 const char *, const char *),
163 const char *pathfmt, ...)
169 va_start(ap, pathfmt);
170 path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
174 xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
177 err = xenbus_watch_path(dev, path, watch, will_handle, callback);
183 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
185 static void xenbus_switch_fatal(struct xenbus_device *, int, int,
189 __xenbus_switch_state(struct xenbus_device *dev,
190 enum xenbus_state state, int depth)
192 /* We check whether the state is currently set to the given value, and
193 if not, then the state is set. We don't want to unconditionally
194 write the given state, because we don't want to fire watches
195 unnecessarily. Furthermore, if the node has gone, we don't write
196 to it, as the device will be tearing down, and we don't want to
197 resurrect that directory.
199 Note that, because of this cached value of our state, this
200 function will not take a caller's Xenstore transaction
201 (something it was trying to in the past) because dev->state
202 would not get reset if the transaction was aborted.
205 struct xenbus_transaction xbt;
209 if (state == dev->state)
215 err = xenbus_transaction_start(&xbt);
217 xenbus_switch_fatal(dev, depth, err, "starting transaction");
221 err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
225 err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
227 xenbus_switch_fatal(dev, depth, err, "writing new state");
233 err = xenbus_transaction_end(xbt, abort);
235 if (err == -EAGAIN && !abort)
237 xenbus_switch_fatal(dev, depth, err, "ending transaction");
245 * xenbus_switch_state
246 * @dev: xenbus device
249 * Advertise in the store a change of the given driver to the given new_state.
250 * Return 0 on success, or -errno on error. On error, the device will switch
251 * to XenbusStateClosing, and the error will be saved in the store.
253 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
255 return __xenbus_switch_state(dev, state, 0);
258 EXPORT_SYMBOL_GPL(xenbus_switch_state);
260 int xenbus_frontend_closed(struct xenbus_device *dev)
262 xenbus_switch_state(dev, XenbusStateClosed);
263 complete(&dev->down);
266 EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
268 static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
269 const char *fmt, va_list ap)
275 #define PRINTF_BUFFER_SIZE 4096
277 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
281 len = sprintf(printf_buffer, "%i ", -err);
282 vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
284 dev_err(&dev->dev, "%s\n", printf_buffer);
286 path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
288 xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
289 dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
290 dev->nodename, printf_buffer);
292 kfree(printf_buffer);
298 * @dev: xenbus device
299 * @err: error to report
300 * @fmt: error message format
302 * Report the given negative errno into the store, along with the given
305 void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
310 xenbus_va_dev_error(dev, err, fmt, ap);
313 EXPORT_SYMBOL_GPL(xenbus_dev_error);
317 * @dev: xenbus device
318 * @err: error to report
319 * @fmt: error message format
321 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
322 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
323 * closedown of this driver and its peer.
326 void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
331 xenbus_va_dev_error(dev, err, fmt, ap);
334 xenbus_switch_state(dev, XenbusStateClosing);
336 EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
339 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
340 * avoiding recursion within xenbus_switch_state.
342 static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
343 const char *fmt, ...)
348 xenbus_va_dev_error(dev, err, fmt, ap);
352 __xenbus_switch_state(dev, XenbusStateClosing, 1);
357 * @dev: xenbus device
358 * @vaddr: starting virtual address of the ring
359 * @nr_pages: number of pages to be granted
360 * @grefs: grant reference array to be filled in
362 * Grant access to the given @vaddr to the peer of the given device.
363 * Then fill in @grefs with grant references. Return 0 on success, or
364 * -errno on error. On error, the device will switch to
365 * XenbusStateClosing, and the error will be saved in the store.
367 int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
368 unsigned int nr_pages, grant_ref_t *grefs)
373 for (i = 0; i < nr_pages; i++) {
376 if (is_vmalloc_addr(vaddr))
377 gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
379 gfn = virt_to_gfn(vaddr);
381 err = gnttab_grant_foreign_access(dev->otherend_id, gfn, 0);
383 xenbus_dev_fatal(dev, err,
384 "granting access to ring page");
389 vaddr = vaddr + XEN_PAGE_SIZE;
395 for (j = 0; j < i; j++)
396 gnttab_end_foreign_access_ref(grefs[j], 0);
399 EXPORT_SYMBOL_GPL(xenbus_grant_ring);
403 * Allocate an event channel for the given xenbus_device, assigning the newly
404 * created local port to *port. Return 0 on success, or -errno on error. On
405 * error, the device will switch to XenbusStateClosing, and the error will be
406 * saved in the store.
408 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
410 struct evtchn_alloc_unbound alloc_unbound;
413 alloc_unbound.dom = DOMID_SELF;
414 alloc_unbound.remote_dom = dev->otherend_id;
416 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
419 xenbus_dev_fatal(dev, err, "allocating event channel");
421 *port = alloc_unbound.port;
425 EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
429 * Free an existing event channel. Returns 0 on success or -errno on error.
431 int xenbus_free_evtchn(struct xenbus_device *dev, int port)
433 struct evtchn_close close;
438 err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
440 xenbus_dev_error(dev, err, "freeing event channel %d", port);
444 EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
448 * xenbus_map_ring_valloc
449 * @dev: xenbus device
450 * @gnt_refs: grant reference array
451 * @nr_grefs: number of grant references
452 * @vaddr: pointer to address to be filled out by mapping
454 * Map @nr_grefs pages of memory into this domain from another
455 * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
456 * pages of virtual address space, maps the pages to that address, and
457 * sets *vaddr to that address. Returns 0 on success, and GNTST_*
458 * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
459 * error. If an error is returned, device will switch to
460 * XenbusStateClosing and the error message will be saved in XenStore.
462 int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
463 unsigned int nr_grefs, void **vaddr)
467 err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
468 /* Some hypervisors are buggy and can return 1. */
470 err = GNTST_general_error;
474 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
476 /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
477 * long), e.g. 32-on-64. Caller is responsible for preparing the
478 * right array to feed into this function */
479 static int __xenbus_map_ring(struct xenbus_device *dev,
480 grant_ref_t *gnt_refs,
481 unsigned int nr_grefs,
482 grant_handle_t *handles,
487 struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
488 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
490 int err = GNTST_okay;
492 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
495 for (i = 0; i < nr_grefs; i++) {
496 memset(&map[i], 0, sizeof(map[i]));
497 gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
499 handles[i] = INVALID_GRANT_HANDLE;
502 gnttab_batch_map(map, i);
504 for (i = 0; i < nr_grefs; i++) {
505 if (map[i].status != GNTST_okay) {
507 xenbus_dev_fatal(dev, map[i].status,
508 "mapping in shared page %d from domain %d",
509 gnt_refs[i], dev->otherend_id);
512 handles[i] = map[i].handle;
518 for (i = j = 0; i < nr_grefs; i++) {
519 if (handles[i] != INVALID_GRANT_HANDLE) {
520 memset(&unmap[j], 0, sizeof(unmap[j]));
521 gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
522 GNTMAP_host_map, handles[i]);
527 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
531 for (i = 0; i < j; i++) {
532 if (unmap[i].status != GNTST_okay) {
541 struct map_ring_valloc_hvm
545 /* Why do we need two arrays? See comment of __xenbus_map_ring */
546 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
547 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
550 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
551 unsigned int goffset,
555 struct map_ring_valloc_hvm *info = data;
556 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
558 info->phys_addrs[info->idx] = vaddr;
559 info->addrs[info->idx] = vaddr;
564 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
565 grant_ref_t *gnt_ref,
566 unsigned int nr_grefs,
569 struct xenbus_map_node *node;
573 struct map_ring_valloc_hvm info = {
576 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
578 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
583 node = kzalloc(sizeof(*node), GFP_KERNEL);
587 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
591 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
592 xenbus_map_ring_setup_grant_hvm,
595 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
596 info.phys_addrs, GNTMAP_host_map, &leaked);
597 node->nr_handles = nr_grefs;
600 goto out_free_ballooned_pages;
602 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
606 goto out_xenbus_unmap_ring;
609 node->hvm.addr = addr;
611 spin_lock(&xenbus_valloc_lock);
612 list_add(&node->next, &xenbus_valloc_pages);
613 spin_unlock(&xenbus_valloc_lock);
618 out_xenbus_unmap_ring:
620 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
622 pr_alert("leaking %p size %u page(s)",
624 out_free_ballooned_pages:
626 free_xenballooned_pages(nr_pages, node->hvm.pages);
635 * @dev: xenbus device
636 * @gnt_refs: grant reference array
637 * @nr_grefs: number of grant reference
638 * @handles: pointer to grant handle to be filled
639 * @vaddrs: addresses to be mapped to
640 * @leaked: fail to clean up a failed map, caller should not free vaddr
642 * Map pages of memory into this domain from another domain's grant table.
643 * xenbus_map_ring does not allocate the virtual address space (you must do
644 * this yourself!). It only maps in the pages to the specified address.
645 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
646 * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
647 * XenbusStateClosing and the first error message will be saved in XenStore.
648 * Further more if we fail to map the ring, caller should check @leaked.
649 * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
650 * should not free the address space of @vaddr.
652 int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
653 unsigned int nr_grefs, grant_handle_t *handles,
654 unsigned long *vaddrs, bool *leaked)
656 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
659 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
662 for (i = 0; i < nr_grefs; i++)
663 phys_addrs[i] = (unsigned long)vaddrs[i];
665 return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
666 phys_addrs, GNTMAP_host_map, leaked);
668 EXPORT_SYMBOL_GPL(xenbus_map_ring);
672 * xenbus_unmap_ring_vfree
673 * @dev: xenbus device
674 * @vaddr: addr to unmap
676 * Based on Rusty Russell's skeleton driver's unmap_page.
677 * Unmap a page of memory in this domain that was imported from another domain.
678 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
679 * xenbus_map_ring_valloc (it will free the virtual address space).
680 * Returns 0 on success and returns GNTST_* on error
681 * (see xen/include/interface/grant_table.h).
683 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
685 return ring_ops->unmap(dev, vaddr);
687 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
690 static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
691 grant_ref_t *gnt_refs,
692 unsigned int nr_grefs,
695 struct xenbus_map_node *node;
696 struct vm_struct *area;
697 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
698 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
699 int err = GNTST_okay;
705 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
708 node = kzalloc(sizeof(*node), GFP_KERNEL);
712 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
718 for (i = 0; i < nr_grefs; i++)
719 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
721 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
723 GNTMAP_host_map | GNTMAP_contains_pte,
728 node->nr_handles = nr_grefs;
729 node->pv.area = area;
731 spin_lock(&xenbus_valloc_lock);
732 list_add(&node->next, &xenbus_valloc_pages);
733 spin_unlock(&xenbus_valloc_lock);
742 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
748 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
750 struct xenbus_map_node *node;
751 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
757 spin_lock(&xenbus_valloc_lock);
758 list_for_each_entry(node, &xenbus_valloc_pages, next) {
759 if (node->pv.area->addr == vaddr) {
760 list_del(&node->next);
766 spin_unlock(&xenbus_valloc_lock);
769 xenbus_dev_error(dev, -ENOENT,
770 "can't find mapped virtual address %p", vaddr);
771 return GNTST_bad_virt_addr;
774 for (i = 0; i < node->nr_handles; i++) {
777 memset(&unmap[i], 0, sizeof(unmap[i]));
778 addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
779 unmap[i].host_addr = arbitrary_virt_to_machine(
780 lookup_address(addr, &level)).maddr;
781 unmap[i].dev_bus_addr = 0;
782 unmap[i].handle = node->handles[i];
785 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
790 for (i = 0; i < node->nr_handles; i++) {
791 if (unmap[i].status != GNTST_okay) {
793 xenbus_dev_error(dev, unmap[i].status,
794 "unmapping page at handle %d error %d",
795 node->handles[i], unmap[i].status);
796 err = unmap[i].status;
802 free_vm_area(node->pv.area);
804 pr_alert("leaking VM area %p size %u page(s)",
805 node->pv.area, node->nr_handles);
811 static const struct xenbus_ring_ops ring_ops_pv = {
812 .map = xenbus_map_ring_valloc_pv,
813 .unmap = xenbus_unmap_ring_vfree_pv,
817 struct unmap_ring_vfree_hvm
820 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
823 static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
824 unsigned int goffset,
828 struct unmap_ring_vfree_hvm *info = data;
830 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
835 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
838 struct xenbus_map_node *node;
840 struct unmap_ring_vfree_hvm info = {
843 unsigned int nr_pages;
845 spin_lock(&xenbus_valloc_lock);
846 list_for_each_entry(node, &xenbus_valloc_pages, next) {
847 addr = node->hvm.addr;
849 list_del(&node->next);
855 spin_unlock(&xenbus_valloc_lock);
858 xenbus_dev_error(dev, -ENOENT,
859 "can't find mapped virtual address %p", vaddr);
860 return GNTST_bad_virt_addr;
863 nr_pages = XENBUS_PAGES(node->nr_handles);
865 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
866 xenbus_unmap_ring_setup_grant_hvm,
869 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
873 free_xenballooned_pages(nr_pages, node->hvm.pages);
876 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
884 * @dev: xenbus device
885 * @handles: grant handle array
886 * @nr_handles: number of handles in the array
887 * @vaddrs: addresses to unmap
889 * Unmap memory in this domain that was imported from another domain.
890 * Returns 0 on success and returns GNTST_* on error
891 * (see xen/include/interface/grant_table.h).
893 int xenbus_unmap_ring(struct xenbus_device *dev,
894 grant_handle_t *handles, unsigned int nr_handles,
895 unsigned long *vaddrs)
897 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
901 if (nr_handles > XENBUS_MAX_RING_GRANTS)
904 for (i = 0; i < nr_handles; i++)
905 gnttab_set_unmap_op(&unmap[i], vaddrs[i],
906 GNTMAP_host_map, handles[i]);
908 if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
912 for (i = 0; i < nr_handles; i++) {
913 if (unmap[i].status != GNTST_okay) {
914 xenbus_dev_error(dev, unmap[i].status,
915 "unmapping page at handle %d error %d",
916 handles[i], unmap[i].status);
917 err = unmap[i].status;
924 EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
928 * xenbus_read_driver_state
929 * @path: path for driver
931 * Return the state of the driver rooted at the given store path, or
932 * XenbusStateUnknown if no state can be read.
934 enum xenbus_state xenbus_read_driver_state(const char *path)
936 enum xenbus_state result;
937 int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
939 result = XenbusStateUnknown;
943 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
945 static const struct xenbus_ring_ops ring_ops_hvm = {
946 .map = xenbus_map_ring_valloc_hvm,
947 .unmap = xenbus_unmap_ring_vfree_hvm,
950 void __init xenbus_ring_ops_init(void)
953 if (!xen_feature(XENFEAT_auto_translated_physmap))
954 ring_ops = &ring_ops_pv;
957 ring_ops = &ring_ops_hvm;