1 /******************************************************************************
4 * Granting foreign access to our memory reservation.
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
36 #include <linux/memblock.h>
37 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
46 #include <linux/ratelimit.h>
47 #include <linux/moduleparam.h>
48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49 #include <linux/dma-mapping.h>
53 #include <xen/interface/xen.h>
55 #include <xen/grant_table.h>
56 #include <xen/interface/memory.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
59 #include <xen/balloon.h>
61 #include <asm/xen/cpuid.h>
63 #include <xen/mem-reservation.h>
64 #include <asm/xen/hypercall.h>
65 #include <asm/xen/interface.h>
67 #include <asm/sync_bitops.h>
69 /* External tools reserve first few grant table entries. */
70 #define NR_RESERVED_ENTRIES 8
71 #define GNTTAB_LIST_END 0xffffffff
73 static grant_ref_t **gnttab_list;
74 static unsigned int nr_grant_frames;
75 static int gnttab_free_count;
76 static grant_ref_t gnttab_free_head;
77 static DEFINE_SPINLOCK(gnttab_list_lock);
78 struct grant_frames xen_auto_xlat_grant_frames;
79 static unsigned int xen_gnttab_version;
80 module_param_named(version, xen_gnttab_version, uint, 0);
83 struct grant_entry_v1 *v1;
84 union grant_entry_v2 *v2;
88 /*This is a structure of function pointers for grant table*/
91 * Version of the grant interface.
95 * Grant refs per grant frame.
97 unsigned int grefs_per_grant_frame;
99 * Mapping a list of frames for storing grant entries. Frames parameter
100 * is used to store grant table address when grant table being setup,
101 * nr_gframes is the number of frames to map grant table. Returning
102 * GNTST_okay means success and negative value means failure.
104 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
106 * Release a list of frames which are mapped in map_frames for grant
109 void (*unmap_frames)(void);
111 * Introducing a valid entry into the grant table, granting the frame of
112 * this grant entry to domain for accessing or transfering. Ref
113 * parameter is reference of this introduced grant entry, domid is id of
114 * granted domain, frame is the page frame to be granted, and flags is
115 * status of the grant entry to be updated.
117 void (*update_entry)(grant_ref_t ref, domid_t domid,
118 unsigned long frame, unsigned flags);
120 * Stop granting a grant entry to domain for accessing. Ref parameter is
121 * reference of a grant entry whose grant access will be stopped,
122 * readonly is not in use in this function. If the grant entry is
123 * currently mapped for reading or writing, just return failure(==0)
124 * directly and don't tear down the grant access. Otherwise, stop grant
125 * access for this entry and return success(==1).
127 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
129 * Stop granting a grant entry to domain for transfer. Ref parameter is
130 * reference of a grant entry whose grant transfer will be stopped. If
131 * tranfer has not started, just reclaim the grant entry and return
132 * failure(==0). Otherwise, wait for the transfer to complete and then
135 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
137 * Read the frame number related to a given grant reference.
139 unsigned long (*read_frame)(grant_ref_t ref);
142 struct unmap_refs_callback_data {
143 struct completion completion;
147 static const struct gnttab_ops *gnttab_interface;
149 /* This reflects status of grant entries, so act as a global value. */
150 static grant_status_t *grstatus;
152 static struct gnttab_free_callback *gnttab_free_callback_list;
154 static int gnttab_expand(unsigned int req_entries);
156 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
157 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
159 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
161 return &gnttab_list[(entry) / RPP][(entry) % RPP];
163 /* This can be used as an l-value */
164 #define gnttab_entry(entry) (*__gnttab_entry(entry))
166 static int get_free_entries(unsigned count)
172 spin_lock_irqsave(&gnttab_list_lock, flags);
174 if ((gnttab_free_count < count) &&
175 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
176 spin_unlock_irqrestore(&gnttab_list_lock, flags);
180 ref = head = gnttab_free_head;
181 gnttab_free_count -= count;
183 head = gnttab_entry(head);
184 gnttab_free_head = gnttab_entry(head);
185 gnttab_entry(head) = GNTTAB_LIST_END;
187 spin_unlock_irqrestore(&gnttab_list_lock, flags);
192 static void do_free_callbacks(void)
194 struct gnttab_free_callback *callback, *next;
196 callback = gnttab_free_callback_list;
197 gnttab_free_callback_list = NULL;
199 while (callback != NULL) {
200 next = callback->next;
201 if (gnttab_free_count >= callback->count) {
202 callback->next = NULL;
203 callback->fn(callback->arg);
205 callback->next = gnttab_free_callback_list;
206 gnttab_free_callback_list = callback;
212 static inline void check_free_callbacks(void)
214 if (unlikely(gnttab_free_callback_list))
218 static void put_free_entry(grant_ref_t ref)
221 spin_lock_irqsave(&gnttab_list_lock, flags);
222 gnttab_entry(ref) = gnttab_free_head;
223 gnttab_free_head = ref;
225 check_free_callbacks();
226 spin_unlock_irqrestore(&gnttab_list_lock, flags);
230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
231 * Introducing a valid entry into the grant table:
232 * 1. Write ent->domid.
233 * 2. Write ent->frame:
234 * GTF_permit_access: Frame to which access is permitted.
235 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
236 * frame, or zero if none.
237 * 3. Write memory barrier (WMB).
238 * 4. Write ent->flags, inc. valid type.
240 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
241 unsigned long frame, unsigned flags)
243 gnttab_shared.v1[ref].domid = domid;
244 gnttab_shared.v1[ref].frame = frame;
246 gnttab_shared.v1[ref].flags = flags;
249 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
250 unsigned long frame, unsigned int flags)
252 gnttab_shared.v2[ref].hdr.domid = domid;
253 gnttab_shared.v2[ref].full_page.frame = frame;
254 wmb(); /* Hypervisor concurrent accesses. */
255 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
259 * Public grant-issuing interface functions
261 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
262 unsigned long frame, int readonly)
264 gnttab_interface->update_entry(ref, domid, frame,
265 GTF_permit_access | (readonly ? GTF_readonly : 0));
267 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
269 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274 ref = get_free_entries(1);
275 if (unlikely(ref < 0))
278 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
282 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
284 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
289 pflags = &gnttab_shared.v1[ref].flags;
293 if (flags & (GTF_reading|GTF_writing))
295 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
300 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
302 gnttab_shared.v2[ref].hdr.flags = 0;
303 mb(); /* Concurrent access by hypervisor. */
304 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
308 * The read of grstatus needs to have acquire semantics.
309 * On x86, reads already have that, and we just need to
310 * protect against compiler reorderings.
311 * On other architectures we may need a full barrier.
323 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
325 return gnttab_interface->end_foreign_access_ref(ref, readonly);
328 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
330 if (_gnttab_end_foreign_access_ref(ref, readonly))
332 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
335 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
337 static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
339 return gnttab_shared.v1[ref].frame;
342 static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
344 return gnttab_shared.v2[ref].full_page.frame;
347 struct deferred_entry {
348 struct list_head list;
354 static LIST_HEAD(deferred_list);
355 static void gnttab_handle_deferred(struct timer_list *);
356 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
358 static void gnttab_handle_deferred(struct timer_list *unused)
360 unsigned int nr = 10;
361 struct deferred_entry *first = NULL;
364 spin_lock_irqsave(&gnttab_list_lock, flags);
366 struct deferred_entry *entry
367 = list_first_entry(&deferred_list,
368 struct deferred_entry, list);
372 list_del(&entry->list);
373 spin_unlock_irqrestore(&gnttab_list_lock, flags);
374 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
375 put_free_entry(entry->ref);
376 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
377 entry->ref, page_to_pfn(entry->page));
378 put_page(entry->page);
382 if (!--entry->warn_delay)
383 pr_info("g.e. %#x still pending\n", entry->ref);
387 spin_lock_irqsave(&gnttab_list_lock, flags);
389 list_add_tail(&entry->list, &deferred_list);
390 else if (list_empty(&deferred_list))
393 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
394 deferred_timer.expires = jiffies + HZ;
395 add_timer(&deferred_timer);
397 spin_unlock_irqrestore(&gnttab_list_lock, flags);
400 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
403 struct deferred_entry *entry;
404 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
405 const char *what = KERN_WARNING "leaking";
407 entry = kmalloc(sizeof(*entry), gfp);
409 unsigned long gfn = gnttab_interface->read_frame(ref);
411 page = pfn_to_page(gfn_to_pfn(gfn));
419 entry->ro = readonly;
421 entry->warn_delay = 60;
422 spin_lock_irqsave(&gnttab_list_lock, flags);
423 list_add_tail(&entry->list, &deferred_list);
424 if (!timer_pending(&deferred_timer)) {
425 deferred_timer.expires = jiffies + HZ;
426 add_timer(&deferred_timer);
428 spin_unlock_irqrestore(&gnttab_list_lock, flags);
429 what = KERN_DEBUG "deferring";
431 printk("%s g.e. %#x (pfn %#lx)\n",
432 what, ref, page ? page_to_pfn(page) : -1);
435 int gnttab_try_end_foreign_access(grant_ref_t ref)
437 int ret = _gnttab_end_foreign_access_ref(ref, 0);
444 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
446 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
449 if (gnttab_try_end_foreign_access(ref)) {
451 put_page(virt_to_page(page));
453 gnttab_add_deferred(ref, readonly,
454 page ? virt_to_page(page) : NULL);
456 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
458 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
462 ref = get_free_entries(1);
463 if (unlikely(ref < 0))
465 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
469 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
471 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
474 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
476 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
478 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
484 pflags = &gnttab_shared.v1[ref].flags;
487 * If a transfer is not even yet started, try to reclaim the grant
488 * reference and return failure (== 0).
490 while (!((flags = *pflags) & GTF_transfer_committed)) {
491 if (sync_cmpxchg(pflags, flags, 0) == flags)
496 /* If a transfer is in progress then wait until it is completed. */
497 while (!(flags & GTF_transfer_completed)) {
502 rmb(); /* Read the frame number /after/ reading completion status. */
503 frame = gnttab_shared.v1[ref].frame;
509 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
515 pflags = &gnttab_shared.v2[ref].hdr.flags;
518 * If a transfer is not even yet started, try to reclaim the grant
519 * reference and return failure (== 0).
521 while (!((flags = *pflags) & GTF_transfer_committed)) {
522 if (sync_cmpxchg(pflags, flags, 0) == flags)
527 /* If a transfer is in progress then wait until it is completed. */
528 while (!(flags & GTF_transfer_completed)) {
533 rmb(); /* Read the frame number /after/ reading completion status. */
534 frame = gnttab_shared.v2[ref].full_page.frame;
540 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
542 return gnttab_interface->end_foreign_transfer_ref(ref);
544 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
546 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
548 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
552 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
554 void gnttab_free_grant_reference(grant_ref_t ref)
558 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
560 void gnttab_free_grant_references(grant_ref_t head)
565 if (head == GNTTAB_LIST_END)
567 spin_lock_irqsave(&gnttab_list_lock, flags);
569 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
570 ref = gnttab_entry(ref);
573 gnttab_entry(ref) = gnttab_free_head;
574 gnttab_free_head = head;
575 gnttab_free_count += count;
576 check_free_callbacks();
577 spin_unlock_irqrestore(&gnttab_list_lock, flags);
579 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
581 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
583 int h = get_free_entries(count);
592 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
594 int gnttab_empty_grant_references(const grant_ref_t *private_head)
596 return (*private_head == GNTTAB_LIST_END);
598 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
600 int gnttab_claim_grant_reference(grant_ref_t *private_head)
602 grant_ref_t g = *private_head;
603 if (unlikely(g == GNTTAB_LIST_END))
605 *private_head = gnttab_entry(g);
608 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
610 void gnttab_release_grant_reference(grant_ref_t *private_head,
613 gnttab_entry(release) = *private_head;
614 *private_head = release;
616 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
618 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
619 void (*fn)(void *), void *arg, u16 count)
622 struct gnttab_free_callback *cb;
624 spin_lock_irqsave(&gnttab_list_lock, flags);
626 /* Check if the callback is already on the list */
627 cb = gnttab_free_callback_list;
636 callback->count = count;
637 callback->next = gnttab_free_callback_list;
638 gnttab_free_callback_list = callback;
639 check_free_callbacks();
641 spin_unlock_irqrestore(&gnttab_list_lock, flags);
643 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
645 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
647 struct gnttab_free_callback **pcb;
650 spin_lock_irqsave(&gnttab_list_lock, flags);
651 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
652 if (*pcb == callback) {
653 *pcb = callback->next;
657 spin_unlock_irqrestore(&gnttab_list_lock, flags);
659 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
661 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
663 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
667 static int grow_gnttab_list(unsigned int more_frames)
669 unsigned int new_nr_grant_frames, extra_entries, i;
670 unsigned int nr_glist_frames, new_nr_glist_frames;
671 unsigned int grefs_per_frame;
673 grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
675 new_nr_grant_frames = nr_grant_frames + more_frames;
676 extra_entries = more_frames * grefs_per_frame;
678 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
679 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
680 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
681 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
687 for (i = grefs_per_frame * nr_grant_frames;
688 i < grefs_per_frame * new_nr_grant_frames - 1; i++)
689 gnttab_entry(i) = i + 1;
691 gnttab_entry(i) = gnttab_free_head;
692 gnttab_free_head = grefs_per_frame * nr_grant_frames;
693 gnttab_free_count += extra_entries;
695 nr_grant_frames = new_nr_grant_frames;
697 check_free_callbacks();
702 while (i-- > nr_glist_frames)
703 free_page((unsigned long) gnttab_list[i]);
707 static unsigned int __max_nr_grant_frames(void)
709 struct gnttab_query_size query;
712 query.dom = DOMID_SELF;
714 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
715 if ((rc < 0) || (query.status != GNTST_okay))
716 return 4; /* Legacy max supported number of frames */
718 return query.max_nr_frames;
721 unsigned int gnttab_max_grant_frames(void)
723 unsigned int xen_max = __max_nr_grant_frames();
724 static unsigned int boot_max_nr_grant_frames;
726 /* First time, initialize it properly. */
727 if (!boot_max_nr_grant_frames)
728 boot_max_nr_grant_frames = __max_nr_grant_frames();
730 if (xen_max > boot_max_nr_grant_frames)
731 return boot_max_nr_grant_frames;
734 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
736 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
739 unsigned int max_nr_gframes = __max_nr_grant_frames();
743 if (xen_auto_xlat_grant_frames.count)
746 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
748 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
752 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
757 for (i = 0; i < max_nr_gframes; i++)
758 pfn[i] = XEN_PFN_DOWN(addr) + i;
760 xen_auto_xlat_grant_frames.vaddr = vaddr;
761 xen_auto_xlat_grant_frames.pfn = pfn;
762 xen_auto_xlat_grant_frames.count = max_nr_gframes;
766 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
768 void gnttab_free_auto_xlat_frames(void)
770 if (!xen_auto_xlat_grant_frames.count)
772 kfree(xen_auto_xlat_grant_frames.pfn);
773 xen_unmap(xen_auto_xlat_grant_frames.vaddr);
775 xen_auto_xlat_grant_frames.pfn = NULL;
776 xen_auto_xlat_grant_frames.count = 0;
777 xen_auto_xlat_grant_frames.vaddr = NULL;
779 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
781 int gnttab_pages_set_private(int nr_pages, struct page **pages)
785 for (i = 0; i < nr_pages; i++) {
786 #if BITS_PER_LONG < 64
787 struct xen_page_foreign *foreign;
789 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
793 set_page_private(pages[i], (unsigned long)foreign);
795 SetPagePrivate(pages[i]);
800 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
803 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
804 * @nr_pages: number of pages to alloc
805 * @pages: returns the pages
807 int gnttab_alloc_pages(int nr_pages, struct page **pages)
811 ret = xen_alloc_unpopulated_pages(nr_pages, pages);
815 ret = gnttab_pages_set_private(nr_pages, pages);
817 gnttab_free_pages(nr_pages, pages);
821 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
823 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
824 static inline void cache_init(struct gnttab_page_cache *cache)
829 static inline bool cache_empty(struct gnttab_page_cache *cache)
831 return !cache->pages;
834 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
839 cache->pages = page->zone_device_data;
844 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
846 page->zone_device_data = cache->pages;
850 static inline void cache_init(struct gnttab_page_cache *cache)
852 INIT_LIST_HEAD(&cache->pages);
855 static inline bool cache_empty(struct gnttab_page_cache *cache)
857 return list_empty(&cache->pages);
860 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
864 page = list_first_entry(&cache->pages, struct page, lru);
865 list_del(&page->lru);
870 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
872 list_add(&page->lru, &cache->pages);
876 void gnttab_page_cache_init(struct gnttab_page_cache *cache)
878 spin_lock_init(&cache->lock);
880 cache->num_pages = 0;
882 EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
884 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
888 spin_lock_irqsave(&cache->lock, flags);
890 if (cache_empty(cache)) {
891 spin_unlock_irqrestore(&cache->lock, flags);
892 return gnttab_alloc_pages(1, page);
895 page[0] = cache_deq(cache);
898 spin_unlock_irqrestore(&cache->lock, flags);
902 EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
904 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
910 spin_lock_irqsave(&cache->lock, flags);
912 for (i = 0; i < num; i++)
913 cache_enq(cache, page[i]);
914 cache->num_pages += num;
916 spin_unlock_irqrestore(&cache->lock, flags);
918 EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
920 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
922 struct page *page[10];
926 spin_lock_irqsave(&cache->lock, flags);
928 while (cache->num_pages > num) {
929 page[i] = cache_deq(cache);
931 if (++i == ARRAY_SIZE(page)) {
932 spin_unlock_irqrestore(&cache->lock, flags);
933 gnttab_free_pages(i, page);
935 spin_lock_irqsave(&cache->lock, flags);
939 spin_unlock_irqrestore(&cache->lock, flags);
942 gnttab_free_pages(i, page);
944 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
946 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
950 for (i = 0; i < nr_pages; i++) {
951 if (PagePrivate(pages[i])) {
952 #if BITS_PER_LONG < 64
953 kfree((void *)page_private(pages[i]));
955 ClearPagePrivate(pages[i]);
959 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
962 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
963 * @nr_pages; number of pages to free
966 void gnttab_free_pages(int nr_pages, struct page **pages)
968 gnttab_pages_clear_private(nr_pages, pages);
969 xen_free_unpopulated_pages(nr_pages, pages);
971 EXPORT_SYMBOL_GPL(gnttab_free_pages);
973 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
975 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
976 * @args: arguments to the function
978 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
980 unsigned long pfn, start_pfn;
984 if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
987 size = args->nr_pages << PAGE_SHIFT;
989 args->vaddr = dma_alloc_coherent(args->dev, size,
991 GFP_KERNEL | __GFP_NOWARN);
993 args->vaddr = dma_alloc_wc(args->dev, size,
995 GFP_KERNEL | __GFP_NOWARN);
997 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1001 start_pfn = __phys_to_pfn(args->dev_bus_addr);
1002 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1004 struct page *page = pfn_to_page(pfn);
1006 args->pages[i] = page;
1007 args->frames[i] = xen_page_to_gfn(page);
1008 xenmem_reservation_scrub_page(page);
1011 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1013 ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1014 if (ret != args->nr_pages) {
1015 pr_debug("Failed to decrease reservation for DMA buffer\n");
1020 ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1027 gnttab_dma_free_pages(args);
1030 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1033 * gnttab_dma_free_pages - free DMAable pages
1034 * @args: arguments to the function
1036 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1041 gnttab_pages_clear_private(args->nr_pages, args->pages);
1043 for (i = 0; i < args->nr_pages; i++)
1044 args->frames[i] = page_to_xen_pfn(args->pages[i]);
1046 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1047 if (ret != args->nr_pages) {
1048 pr_debug("Failed to increase reservation for DMA buffer\n");
1054 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1057 size = args->nr_pages << PAGE_SHIFT;
1059 dma_free_coherent(args->dev, size,
1060 args->vaddr, args->dev_bus_addr);
1062 dma_free_wc(args->dev, size,
1063 args->vaddr, args->dev_bus_addr);
1066 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1069 /* Handling of paged out grant targets (GNTST_eagain) */
1070 #define MAX_DELAY 256
1072 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1078 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1079 if (*status == GNTST_eagain)
1081 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1083 if (delay >= MAX_DELAY) {
1084 pr_err("%s: %s eagain grant\n", func, current->comm);
1085 *status = GNTST_bad_page;
1089 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1091 struct gnttab_map_grant_ref *op;
1093 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1095 for (op = batch; op < batch + count; op++)
1096 if (op->status == GNTST_eagain)
1097 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1098 &op->status, __func__);
1100 EXPORT_SYMBOL_GPL(gnttab_batch_map);
1102 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1104 struct gnttab_copy *op;
1106 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1108 for (op = batch; op < batch + count; op++)
1109 if (op->status == GNTST_eagain)
1110 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1111 &op->status, __func__);
1113 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1115 void gnttab_foreach_grant_in_range(struct page *page,
1116 unsigned int offset,
1121 unsigned int goffset;
1123 unsigned long xen_pfn;
1125 len = min_t(unsigned int, PAGE_SIZE - offset, len);
1126 goffset = xen_offset_in_page(offset);
1128 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1131 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1132 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1139 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1141 void gnttab_foreach_grant(struct page **pages,
1142 unsigned int nr_grefs,
1146 unsigned int goffset = 0;
1147 unsigned long xen_pfn = 0;
1150 for (i = 0; i < nr_grefs; i++) {
1151 if ((i % XEN_PFN_PER_PAGE) == 0) {
1152 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1156 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1158 goffset += XEN_PAGE_SIZE;
1163 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1164 struct gnttab_map_grant_ref *kmap_ops,
1165 struct page **pages, unsigned int count)
1169 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1173 for (i = 0; i < count; i++) {
1174 switch (map_ops[i].status) {
1177 struct xen_page_foreign *foreign;
1179 SetPageForeign(pages[i]);
1180 foreign = xen_page_foreign(pages[i]);
1181 foreign->domid = map_ops[i].dom;
1182 foreign->gref = map_ops[i].ref;
1186 case GNTST_no_device_space:
1187 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1191 /* Retry eagain maps */
1192 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1194 &map_ops[i].status, __func__);
1195 /* Test status in next loop iteration. */
1204 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1206 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1208 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1209 struct gnttab_unmap_grant_ref *kunmap_ops,
1210 struct page **pages, unsigned int count)
1215 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1219 for (i = 0; i < count; i++)
1220 ClearPageForeign(pages[i]);
1222 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1224 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1226 #define GNTTAB_UNMAP_REFS_DELAY 5
1228 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1230 static void gnttab_unmap_work(struct work_struct *work)
1232 struct gntab_unmap_queue_data
1233 *unmap_data = container_of(work,
1234 struct gntab_unmap_queue_data,
1236 if (unmap_data->age != UINT_MAX)
1238 __gnttab_unmap_refs_async(unmap_data);
1241 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1246 for (pc = 0; pc < item->count; pc++) {
1247 if (page_count(item->pages[pc]) > 1) {
1248 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1249 schedule_delayed_work(&item->gnttab_work,
1250 msecs_to_jiffies(delay));
1255 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1256 item->pages, item->count);
1257 item->done(ret, item);
1260 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1262 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1265 __gnttab_unmap_refs_async(item);
1267 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1269 static void unmap_refs_callback(int result,
1270 struct gntab_unmap_queue_data *data)
1272 struct unmap_refs_callback_data *d = data->data;
1275 complete(&d->completion);
1278 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1280 struct unmap_refs_callback_data data;
1282 init_completion(&data.completion);
1284 item->done = &unmap_refs_callback;
1285 gnttab_unmap_refs_async(item);
1286 wait_for_completion(&data.completion);
1290 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1292 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1294 return gnttab_frames(nr_grant_frames, SPP);
1297 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1301 rc = arch_gnttab_map_shared(frames, nr_gframes,
1302 gnttab_max_grant_frames(),
1303 &gnttab_shared.addr);
1309 static void gnttab_unmap_frames_v1(void)
1311 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1314 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1317 unsigned int nr_sframes;
1318 struct gnttab_get_status_frames getframes;
1321 nr_sframes = nr_status_frames(nr_gframes);
1323 /* No need for kzalloc as it is initialized in following hypercall
1324 * GNTTABOP_get_status_frames.
1326 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1330 getframes.dom = DOMID_SELF;
1331 getframes.nr_frames = nr_sframes;
1332 set_xen_guest_handle(getframes.frame_list, sframes);
1334 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1336 if (rc == -ENOSYS) {
1341 BUG_ON(rc || getframes.status);
1343 rc = arch_gnttab_map_status(sframes, nr_sframes,
1344 nr_status_frames(gnttab_max_grant_frames()),
1349 rc = arch_gnttab_map_shared(frames, nr_gframes,
1350 gnttab_max_grant_frames(),
1351 &gnttab_shared.addr);
1357 static void gnttab_unmap_frames_v2(void)
1359 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1360 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1363 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1365 struct gnttab_setup_table setup;
1367 unsigned int nr_gframes = end_idx + 1;
1370 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1371 struct xen_add_to_physmap xatp;
1372 unsigned int i = end_idx;
1374 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1376 * Loop backwards, so that the first hypercall has the largest
1377 * index, ensuring that the table will grow only once.
1380 xatp.domid = DOMID_SELF;
1382 xatp.space = XENMAPSPACE_grant_table;
1383 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1384 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1386 pr_warn("grant table add_to_physmap failed, err=%d\n",
1390 } while (i-- > start_idx);
1395 /* No need for kzalloc as it is initialized in following hypercall
1396 * GNTTABOP_setup_table.
1398 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1402 setup.dom = DOMID_SELF;
1403 setup.nr_frames = nr_gframes;
1404 set_xen_guest_handle(setup.frame_list, frames);
1406 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1407 if (rc == -ENOSYS) {
1412 BUG_ON(rc || setup.status);
1414 rc = gnttab_interface->map_frames(frames, nr_gframes);
1421 static const struct gnttab_ops gnttab_v1_ops = {
1423 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1424 sizeof(struct grant_entry_v1),
1425 .map_frames = gnttab_map_frames_v1,
1426 .unmap_frames = gnttab_unmap_frames_v1,
1427 .update_entry = gnttab_update_entry_v1,
1428 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1429 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
1430 .read_frame = gnttab_read_frame_v1,
1433 static const struct gnttab_ops gnttab_v2_ops = {
1435 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1436 sizeof(union grant_entry_v2),
1437 .map_frames = gnttab_map_frames_v2,
1438 .unmap_frames = gnttab_unmap_frames_v2,
1439 .update_entry = gnttab_update_entry_v2,
1440 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1441 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
1442 .read_frame = gnttab_read_frame_v2,
1445 static bool gnttab_need_v2(void)
1448 uint32_t base, width;
1450 if (xen_pv_domain()) {
1451 base = xen_cpuid_base();
1452 if (cpuid_eax(base) < 5)
1453 return false; /* Information not available, use V1. */
1454 width = cpuid_ebx(base + 5) &
1455 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1456 return width > 32 + PAGE_SHIFT;
1459 return !!(max_possible_pfn >> 32);
1462 static void gnttab_request_version(void)
1465 struct gnttab_set_version gsv;
1467 if (gnttab_need_v2())
1472 /* Boot parameter overrides automatic selection. */
1473 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1474 gsv.version = xen_gnttab_version;
1476 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1477 if (rc == 0 && gsv.version == 2)
1478 gnttab_interface = &gnttab_v2_ops;
1480 gnttab_interface = &gnttab_v1_ops;
1481 pr_info("Grant tables using version %d layout\n",
1482 gnttab_interface->version);
1485 static int gnttab_setup(void)
1487 unsigned int max_nr_gframes;
1489 max_nr_gframes = gnttab_max_grant_frames();
1490 if (max_nr_gframes < nr_grant_frames)
1493 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1494 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1495 if (gnttab_shared.addr == NULL) {
1496 pr_warn("gnttab share frames is not mapped!\n");
1500 return gnttab_map(0, nr_grant_frames - 1);
1503 int gnttab_resume(void)
1505 gnttab_request_version();
1506 return gnttab_setup();
1509 int gnttab_suspend(void)
1511 if (!xen_feature(XENFEAT_auto_translated_physmap))
1512 gnttab_interface->unmap_frames();
1516 static int gnttab_expand(unsigned int req_entries)
1519 unsigned int cur, extra;
1521 cur = nr_grant_frames;
1522 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1523 gnttab_interface->grefs_per_grant_frame);
1524 if (cur + extra > gnttab_max_grant_frames()) {
1525 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1526 " cur=%u extra=%u limit=%u"
1527 " gnttab_free_count=%u req_entries=%u\n",
1528 cur, extra, gnttab_max_grant_frames(),
1529 gnttab_free_count, req_entries);
1533 rc = gnttab_map(cur, cur + extra - 1);
1535 rc = grow_gnttab_list(extra);
1540 int gnttab_init(void)
1543 unsigned long max_nr_grant_frames;
1544 unsigned int max_nr_glist_frames, nr_glist_frames;
1545 unsigned int nr_init_grefs;
1548 gnttab_request_version();
1549 max_nr_grant_frames = gnttab_max_grant_frames();
1550 nr_grant_frames = 1;
1552 /* Determine the maximum number of frames required for the
1553 * grant reference free list on the current hypervisor.
1555 max_nr_glist_frames = (max_nr_grant_frames *
1556 gnttab_interface->grefs_per_grant_frame / RPP);
1558 gnttab_list = kmalloc_array(max_nr_glist_frames,
1559 sizeof(grant_ref_t *),
1561 if (gnttab_list == NULL)
1564 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1565 for (i = 0; i < nr_glist_frames; i++) {
1566 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1567 if (gnttab_list[i] == NULL) {
1573 ret = arch_gnttab_init(max_nr_grant_frames,
1574 nr_status_frames(max_nr_grant_frames));
1578 if (gnttab_setup() < 0) {
1583 nr_init_grefs = nr_grant_frames *
1584 gnttab_interface->grefs_per_grant_frame;
1586 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1587 gnttab_entry(i) = i + 1;
1589 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1590 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1591 gnttab_free_head = NR_RESERVED_ENTRIES;
1593 printk("Grant table initialized\n");
1597 for (i--; i >= 0; i--)
1598 free_page((unsigned long)gnttab_list[i]);
1602 EXPORT_SYMBOL_GPL(gnttab_init);
1604 static int __gnttab_init(void)
1609 /* Delay grant-table initialization in the PV on HVM case */
1610 if (xen_hvm_domain() && !xen_pvh_domain())
1613 return gnttab_init();
1615 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1616 * beforehand to initialize xen_auto_xlat_grant_frames. */
1617 core_initcall_sync(__gnttab_init);