KVM: selftests: Restrict test region to 48-bit physical addresses when using nested
[linux-2.6-microblaze.git] / drivers / xen / grant-table.c
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36 #include <linux/memblock.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
46 #include <linux/ratelimit.h>
47 #include <linux/moduleparam.h>
48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49 #include <linux/dma-mapping.h>
50 #endif
51
52 #include <xen/xen.h>
53 #include <xen/interface/xen.h>
54 #include <xen/page.h>
55 #include <xen/grant_table.h>
56 #include <xen/interface/memory.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
59 #include <xen/balloon.h>
60 #ifdef CONFIG_X86
61 #include <asm/xen/cpuid.h>
62 #endif
63 #include <xen/mem-reservation.h>
64 #include <asm/xen/hypercall.h>
65 #include <asm/xen/interface.h>
66
67 #include <asm/sync_bitops.h>
68
69 #define GNTTAB_LIST_END 0xffffffff
70
71 static grant_ref_t **gnttab_list;
72 static unsigned int nr_grant_frames;
73 static int gnttab_free_count;
74 static grant_ref_t gnttab_free_head;
75 static DEFINE_SPINLOCK(gnttab_list_lock);
76 struct grant_frames xen_auto_xlat_grant_frames;
77 static unsigned int xen_gnttab_version;
78 module_param_named(version, xen_gnttab_version, uint, 0);
79
80 static union {
81         struct grant_entry_v1 *v1;
82         union grant_entry_v2 *v2;
83         void *addr;
84 } gnttab_shared;
85
86 /*This is a structure of function pointers for grant table*/
87 struct gnttab_ops {
88         /*
89          * Version of the grant interface.
90          */
91         unsigned int version;
92         /*
93          * Grant refs per grant frame.
94          */
95         unsigned int grefs_per_grant_frame;
96         /*
97          * Mapping a list of frames for storing grant entries. Frames parameter
98          * is used to store grant table address when grant table being setup,
99          * nr_gframes is the number of frames to map grant table. Returning
100          * GNTST_okay means success and negative value means failure.
101          */
102         int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
103         /*
104          * Release a list of frames which are mapped in map_frames for grant
105          * entry status.
106          */
107         void (*unmap_frames)(void);
108         /*
109          * Introducing a valid entry into the grant table, granting the frame of
110          * this grant entry to domain for accessing. Ref
111          * parameter is reference of this introduced grant entry, domid is id of
112          * granted domain, frame is the page frame to be granted, and flags is
113          * status of the grant entry to be updated.
114          */
115         void (*update_entry)(grant_ref_t ref, domid_t domid,
116                              unsigned long frame, unsigned flags);
117         /*
118          * Stop granting a grant entry to domain for accessing. Ref parameter is
119          * reference of a grant entry whose grant access will be stopped.
120          * If the grant entry is currently mapped for reading or writing, just
121          * return failure(==0) directly and don't tear down the grant access.
122          * Otherwise, stop grant access for this entry and return success(==1).
123          */
124         int (*end_foreign_access_ref)(grant_ref_t ref);
125         /*
126          * Read the frame number related to a given grant reference.
127          */
128         unsigned long (*read_frame)(grant_ref_t ref);
129 };
130
131 struct unmap_refs_callback_data {
132         struct completion completion;
133         int result;
134 };
135
136 static const struct gnttab_ops *gnttab_interface;
137
138 /* This reflects status of grant entries, so act as a global value. */
139 static grant_status_t *grstatus;
140
141 static struct gnttab_free_callback *gnttab_free_callback_list;
142
143 static int gnttab_expand(unsigned int req_entries);
144
145 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
146 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
147
148 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
149 {
150         return &gnttab_list[(entry) / RPP][(entry) % RPP];
151 }
152 /* This can be used as an l-value */
153 #define gnttab_entry(entry) (*__gnttab_entry(entry))
154
155 static int get_free_entries(unsigned count)
156 {
157         unsigned long flags;
158         int ref, rc = 0;
159         grant_ref_t head;
160
161         spin_lock_irqsave(&gnttab_list_lock, flags);
162
163         if ((gnttab_free_count < count) &&
164             ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
165                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
166                 return rc;
167         }
168
169         ref = head = gnttab_free_head;
170         gnttab_free_count -= count;
171         while (count-- > 1)
172                 head = gnttab_entry(head);
173         gnttab_free_head = gnttab_entry(head);
174         gnttab_entry(head) = GNTTAB_LIST_END;
175
176         spin_unlock_irqrestore(&gnttab_list_lock, flags);
177
178         return ref;
179 }
180
181 static void do_free_callbacks(void)
182 {
183         struct gnttab_free_callback *callback, *next;
184
185         callback = gnttab_free_callback_list;
186         gnttab_free_callback_list = NULL;
187
188         while (callback != NULL) {
189                 next = callback->next;
190                 if (gnttab_free_count >= callback->count) {
191                         callback->next = NULL;
192                         callback->fn(callback->arg);
193                 } else {
194                         callback->next = gnttab_free_callback_list;
195                         gnttab_free_callback_list = callback;
196                 }
197                 callback = next;
198         }
199 }
200
201 static inline void check_free_callbacks(void)
202 {
203         if (unlikely(gnttab_free_callback_list))
204                 do_free_callbacks();
205 }
206
207 static void put_free_entry(grant_ref_t ref)
208 {
209         unsigned long flags;
210
211         if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
212                 return;
213
214         spin_lock_irqsave(&gnttab_list_lock, flags);
215         gnttab_entry(ref) = gnttab_free_head;
216         gnttab_free_head = ref;
217         gnttab_free_count++;
218         check_free_callbacks();
219         spin_unlock_irqrestore(&gnttab_list_lock, flags);
220 }
221
222 /*
223  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
224  * Introducing a valid entry into the grant table:
225  *  1. Write ent->domid.
226  *  2. Write ent->frame: Frame to which access is permitted.
227  *  3. Write memory barrier (WMB).
228  *  4. Write ent->flags, inc. valid type.
229  */
230 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
231                                    unsigned long frame, unsigned flags)
232 {
233         gnttab_shared.v1[ref].domid = domid;
234         gnttab_shared.v1[ref].frame = frame;
235         wmb();
236         gnttab_shared.v1[ref].flags = flags;
237 }
238
239 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
240                                    unsigned long frame, unsigned int flags)
241 {
242         gnttab_shared.v2[ref].hdr.domid = domid;
243         gnttab_shared.v2[ref].full_page.frame = frame;
244         wmb();  /* Hypervisor concurrent accesses. */
245         gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
246 }
247
248 /*
249  * Public grant-issuing interface functions
250  */
251 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
252                                      unsigned long frame, int readonly)
253 {
254         gnttab_interface->update_entry(ref, domid, frame,
255                            GTF_permit_access | (readonly ? GTF_readonly : 0));
256 }
257 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
258
259 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
260                                 int readonly)
261 {
262         int ref;
263
264         ref = get_free_entries(1);
265         if (unlikely(ref < 0))
266                 return -ENOSPC;
267
268         gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
269
270         return ref;
271 }
272 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
273
274 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
275 {
276         u16 flags, nflags;
277         u16 *pflags;
278
279         pflags = &gnttab_shared.v1[ref].flags;
280         nflags = *pflags;
281         do {
282                 flags = nflags;
283                 if (flags & (GTF_reading|GTF_writing))
284                         return 0;
285         } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
286
287         return 1;
288 }
289
290 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
291 {
292         gnttab_shared.v2[ref].hdr.flags = 0;
293         mb();   /* Concurrent access by hypervisor. */
294         if (grstatus[ref] & (GTF_reading|GTF_writing)) {
295                 return 0;
296         } else {
297                 /*
298                  * The read of grstatus needs to have acquire semantics.
299                  *  On x86, reads already have that, and we just need to
300                  * protect against compiler reorderings.
301                  * On other architectures we may need a full barrier.
302                  */
303 #ifdef CONFIG_X86
304                 barrier();
305 #else
306                 mb();
307 #endif
308         }
309
310         return 1;
311 }
312
313 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
314 {
315         return gnttab_interface->end_foreign_access_ref(ref);
316 }
317
318 int gnttab_end_foreign_access_ref(grant_ref_t ref)
319 {
320         if (_gnttab_end_foreign_access_ref(ref))
321                 return 1;
322         pr_warn("WARNING: g.e. %#x still in use!\n", ref);
323         return 0;
324 }
325 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
326
327 static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
328 {
329         return gnttab_shared.v1[ref].frame;
330 }
331
332 static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
333 {
334         return gnttab_shared.v2[ref].full_page.frame;
335 }
336
337 struct deferred_entry {
338         struct list_head list;
339         grant_ref_t ref;
340         uint16_t warn_delay;
341         struct page *page;
342 };
343 static LIST_HEAD(deferred_list);
344 static void gnttab_handle_deferred(struct timer_list *);
345 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
346
347 static void gnttab_handle_deferred(struct timer_list *unused)
348 {
349         unsigned int nr = 10;
350         struct deferred_entry *first = NULL;
351         unsigned long flags;
352
353         spin_lock_irqsave(&gnttab_list_lock, flags);
354         while (nr--) {
355                 struct deferred_entry *entry
356                         = list_first_entry(&deferred_list,
357                                            struct deferred_entry, list);
358
359                 if (entry == first)
360                         break;
361                 list_del(&entry->list);
362                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
363                 if (_gnttab_end_foreign_access_ref(entry->ref)) {
364                         put_free_entry(entry->ref);
365                         pr_debug("freeing g.e. %#x (pfn %#lx)\n",
366                                  entry->ref, page_to_pfn(entry->page));
367                         put_page(entry->page);
368                         kfree(entry);
369                         entry = NULL;
370                 } else {
371                         if (!--entry->warn_delay)
372                                 pr_info("g.e. %#x still pending\n", entry->ref);
373                         if (!first)
374                                 first = entry;
375                 }
376                 spin_lock_irqsave(&gnttab_list_lock, flags);
377                 if (entry)
378                         list_add_tail(&entry->list, &deferred_list);
379                 else if (list_empty(&deferred_list))
380                         break;
381         }
382         if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
383                 deferred_timer.expires = jiffies + HZ;
384                 add_timer(&deferred_timer);
385         }
386         spin_unlock_irqrestore(&gnttab_list_lock, flags);
387 }
388
389 static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
390 {
391         struct deferred_entry *entry;
392         gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
393         const char *what = KERN_WARNING "leaking";
394
395         entry = kmalloc(sizeof(*entry), gfp);
396         if (!page) {
397                 unsigned long gfn = gnttab_interface->read_frame(ref);
398
399                 page = pfn_to_page(gfn_to_pfn(gfn));
400                 get_page(page);
401         }
402
403         if (entry) {
404                 unsigned long flags;
405
406                 entry->ref = ref;
407                 entry->page = page;
408                 entry->warn_delay = 60;
409                 spin_lock_irqsave(&gnttab_list_lock, flags);
410                 list_add_tail(&entry->list, &deferred_list);
411                 if (!timer_pending(&deferred_timer)) {
412                         deferred_timer.expires = jiffies + HZ;
413                         add_timer(&deferred_timer);
414                 }
415                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
416                 what = KERN_DEBUG "deferring";
417         }
418         printk("%s g.e. %#x (pfn %#lx)\n",
419                what, ref, page ? page_to_pfn(page) : -1);
420 }
421
422 int gnttab_try_end_foreign_access(grant_ref_t ref)
423 {
424         int ret = _gnttab_end_foreign_access_ref(ref);
425
426         if (ret)
427                 put_free_entry(ref);
428
429         return ret;
430 }
431 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
432
433 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
434 {
435         if (gnttab_try_end_foreign_access(ref)) {
436                 if (page)
437                         put_page(page);
438         } else
439                 gnttab_add_deferred(ref, page);
440 }
441 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
442
443 void gnttab_free_grant_reference(grant_ref_t ref)
444 {
445         put_free_entry(ref);
446 }
447 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
448
449 void gnttab_free_grant_references(grant_ref_t head)
450 {
451         grant_ref_t ref;
452         unsigned long flags;
453         int count = 1;
454         if (head == GNTTAB_LIST_END)
455                 return;
456         spin_lock_irqsave(&gnttab_list_lock, flags);
457         ref = head;
458         while (gnttab_entry(ref) != GNTTAB_LIST_END) {
459                 ref = gnttab_entry(ref);
460                 count++;
461         }
462         gnttab_entry(ref) = gnttab_free_head;
463         gnttab_free_head = head;
464         gnttab_free_count += count;
465         check_free_callbacks();
466         spin_unlock_irqrestore(&gnttab_list_lock, flags);
467 }
468 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
469
470 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
471 {
472         int h = get_free_entries(count);
473
474         if (h < 0)
475                 return -ENOSPC;
476
477         *head = h;
478
479         return 0;
480 }
481 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
482
483 int gnttab_empty_grant_references(const grant_ref_t *private_head)
484 {
485         return (*private_head == GNTTAB_LIST_END);
486 }
487 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
488
489 int gnttab_claim_grant_reference(grant_ref_t *private_head)
490 {
491         grant_ref_t g = *private_head;
492         if (unlikely(g == GNTTAB_LIST_END))
493                 return -ENOSPC;
494         *private_head = gnttab_entry(g);
495         return g;
496 }
497 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
498
499 void gnttab_release_grant_reference(grant_ref_t *private_head,
500                                     grant_ref_t release)
501 {
502         gnttab_entry(release) = *private_head;
503         *private_head = release;
504 }
505 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
506
507 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
508                                   void (*fn)(void *), void *arg, u16 count)
509 {
510         unsigned long flags;
511         struct gnttab_free_callback *cb;
512
513         spin_lock_irqsave(&gnttab_list_lock, flags);
514
515         /* Check if the callback is already on the list */
516         cb = gnttab_free_callback_list;
517         while (cb) {
518                 if (cb == callback)
519                         goto out;
520                 cb = cb->next;
521         }
522
523         callback->fn = fn;
524         callback->arg = arg;
525         callback->count = count;
526         callback->next = gnttab_free_callback_list;
527         gnttab_free_callback_list = callback;
528         check_free_callbacks();
529 out:
530         spin_unlock_irqrestore(&gnttab_list_lock, flags);
531 }
532 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
533
534 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
535 {
536         struct gnttab_free_callback **pcb;
537         unsigned long flags;
538
539         spin_lock_irqsave(&gnttab_list_lock, flags);
540         for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
541                 if (*pcb == callback) {
542                         *pcb = callback->next;
543                         break;
544                 }
545         }
546         spin_unlock_irqrestore(&gnttab_list_lock, flags);
547 }
548 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
549
550 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
551 {
552         return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
553                align;
554 }
555
556 static int grow_gnttab_list(unsigned int more_frames)
557 {
558         unsigned int new_nr_grant_frames, extra_entries, i;
559         unsigned int nr_glist_frames, new_nr_glist_frames;
560         unsigned int grefs_per_frame;
561
562         grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
563
564         new_nr_grant_frames = nr_grant_frames + more_frames;
565         extra_entries = more_frames * grefs_per_frame;
566
567         nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
568         new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
569         for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
570                 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
571                 if (!gnttab_list[i])
572                         goto grow_nomem;
573         }
574
575
576         for (i = grefs_per_frame * nr_grant_frames;
577              i < grefs_per_frame * new_nr_grant_frames - 1; i++)
578                 gnttab_entry(i) = i + 1;
579
580         gnttab_entry(i) = gnttab_free_head;
581         gnttab_free_head = grefs_per_frame * nr_grant_frames;
582         gnttab_free_count += extra_entries;
583
584         nr_grant_frames = new_nr_grant_frames;
585
586         check_free_callbacks();
587
588         return 0;
589
590 grow_nomem:
591         while (i-- > nr_glist_frames)
592                 free_page((unsigned long) gnttab_list[i]);
593         return -ENOMEM;
594 }
595
596 static unsigned int __max_nr_grant_frames(void)
597 {
598         struct gnttab_query_size query;
599         int rc;
600
601         query.dom = DOMID_SELF;
602
603         rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
604         if ((rc < 0) || (query.status != GNTST_okay))
605                 return 4; /* Legacy max supported number of frames */
606
607         return query.max_nr_frames;
608 }
609
610 unsigned int gnttab_max_grant_frames(void)
611 {
612         unsigned int xen_max = __max_nr_grant_frames();
613         static unsigned int boot_max_nr_grant_frames;
614
615         /* First time, initialize it properly. */
616         if (!boot_max_nr_grant_frames)
617                 boot_max_nr_grant_frames = __max_nr_grant_frames();
618
619         if (xen_max > boot_max_nr_grant_frames)
620                 return boot_max_nr_grant_frames;
621         return xen_max;
622 }
623 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
624
625 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
626 {
627         xen_pfn_t *pfn;
628         unsigned int max_nr_gframes = __max_nr_grant_frames();
629         unsigned int i;
630         void *vaddr;
631
632         if (xen_auto_xlat_grant_frames.count)
633                 return -EINVAL;
634
635         vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
636         if (vaddr == NULL) {
637                 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
638                         &addr);
639                 return -ENOMEM;
640         }
641         pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
642         if (!pfn) {
643                 memunmap(vaddr);
644                 return -ENOMEM;
645         }
646         for (i = 0; i < max_nr_gframes; i++)
647                 pfn[i] = XEN_PFN_DOWN(addr) + i;
648
649         xen_auto_xlat_grant_frames.vaddr = vaddr;
650         xen_auto_xlat_grant_frames.pfn = pfn;
651         xen_auto_xlat_grant_frames.count = max_nr_gframes;
652
653         return 0;
654 }
655 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
656
657 void gnttab_free_auto_xlat_frames(void)
658 {
659         if (!xen_auto_xlat_grant_frames.count)
660                 return;
661         kfree(xen_auto_xlat_grant_frames.pfn);
662         memunmap(xen_auto_xlat_grant_frames.vaddr);
663
664         xen_auto_xlat_grant_frames.pfn = NULL;
665         xen_auto_xlat_grant_frames.count = 0;
666         xen_auto_xlat_grant_frames.vaddr = NULL;
667 }
668 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
669
670 int gnttab_pages_set_private(int nr_pages, struct page **pages)
671 {
672         int i;
673
674         for (i = 0; i < nr_pages; i++) {
675 #if BITS_PER_LONG < 64
676                 struct xen_page_foreign *foreign;
677
678                 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
679                 if (!foreign)
680                         return -ENOMEM;
681
682                 set_page_private(pages[i], (unsigned long)foreign);
683 #endif
684                 SetPagePrivate(pages[i]);
685         }
686
687         return 0;
688 }
689 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
690
691 /**
692  * gnttab_alloc_pages - alloc pages suitable for grant mapping into
693  * @nr_pages: number of pages to alloc
694  * @pages: returns the pages
695  */
696 int gnttab_alloc_pages(int nr_pages, struct page **pages)
697 {
698         int ret;
699
700         ret = xen_alloc_unpopulated_pages(nr_pages, pages);
701         if (ret < 0)
702                 return ret;
703
704         ret = gnttab_pages_set_private(nr_pages, pages);
705         if (ret < 0)
706                 gnttab_free_pages(nr_pages, pages);
707
708         return ret;
709 }
710 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
711
712 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
713 static inline void cache_init(struct gnttab_page_cache *cache)
714 {
715         cache->pages = NULL;
716 }
717
718 static inline bool cache_empty(struct gnttab_page_cache *cache)
719 {
720         return !cache->pages;
721 }
722
723 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
724 {
725         struct page *page;
726
727         page = cache->pages;
728         cache->pages = page->zone_device_data;
729
730         return page;
731 }
732
733 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
734 {
735         page->zone_device_data = cache->pages;
736         cache->pages = page;
737 }
738 #else
739 static inline void cache_init(struct gnttab_page_cache *cache)
740 {
741         INIT_LIST_HEAD(&cache->pages);
742 }
743
744 static inline bool cache_empty(struct gnttab_page_cache *cache)
745 {
746         return list_empty(&cache->pages);
747 }
748
749 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
750 {
751         struct page *page;
752
753         page = list_first_entry(&cache->pages, struct page, lru);
754         list_del(&page->lru);
755
756         return page;
757 }
758
759 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
760 {
761         list_add(&page->lru, &cache->pages);
762 }
763 #endif
764
765 void gnttab_page_cache_init(struct gnttab_page_cache *cache)
766 {
767         spin_lock_init(&cache->lock);
768         cache_init(cache);
769         cache->num_pages = 0;
770 }
771 EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
772
773 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
774 {
775         unsigned long flags;
776
777         spin_lock_irqsave(&cache->lock, flags);
778
779         if (cache_empty(cache)) {
780                 spin_unlock_irqrestore(&cache->lock, flags);
781                 return gnttab_alloc_pages(1, page);
782         }
783
784         page[0] = cache_deq(cache);
785         cache->num_pages--;
786
787         spin_unlock_irqrestore(&cache->lock, flags);
788
789         return 0;
790 }
791 EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
792
793 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
794                            unsigned int num)
795 {
796         unsigned long flags;
797         unsigned int i;
798
799         spin_lock_irqsave(&cache->lock, flags);
800
801         for (i = 0; i < num; i++)
802                 cache_enq(cache, page[i]);
803         cache->num_pages += num;
804
805         spin_unlock_irqrestore(&cache->lock, flags);
806 }
807 EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
808
809 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
810 {
811         struct page *page[10];
812         unsigned int i = 0;
813         unsigned long flags;
814
815         spin_lock_irqsave(&cache->lock, flags);
816
817         while (cache->num_pages > num) {
818                 page[i] = cache_deq(cache);
819                 cache->num_pages--;
820                 if (++i == ARRAY_SIZE(page)) {
821                         spin_unlock_irqrestore(&cache->lock, flags);
822                         gnttab_free_pages(i, page);
823                         i = 0;
824                         spin_lock_irqsave(&cache->lock, flags);
825                 }
826         }
827
828         spin_unlock_irqrestore(&cache->lock, flags);
829
830         if (i != 0)
831                 gnttab_free_pages(i, page);
832 }
833 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
834
835 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
836 {
837         int i;
838
839         for (i = 0; i < nr_pages; i++) {
840                 if (PagePrivate(pages[i])) {
841 #if BITS_PER_LONG < 64
842                         kfree((void *)page_private(pages[i]));
843 #endif
844                         ClearPagePrivate(pages[i]);
845                 }
846         }
847 }
848 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
849
850 /**
851  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
852  * @nr_pages; number of pages to free
853  * @pages: the pages
854  */
855 void gnttab_free_pages(int nr_pages, struct page **pages)
856 {
857         gnttab_pages_clear_private(nr_pages, pages);
858         xen_free_unpopulated_pages(nr_pages, pages);
859 }
860 EXPORT_SYMBOL_GPL(gnttab_free_pages);
861
862 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
863 /**
864  * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
865  * @args: arguments to the function
866  */
867 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
868 {
869         unsigned long pfn, start_pfn;
870         size_t size;
871         int i, ret;
872
873         size = args->nr_pages << PAGE_SHIFT;
874         if (args->coherent)
875                 args->vaddr = dma_alloc_coherent(args->dev, size,
876                                                  &args->dev_bus_addr,
877                                                  GFP_KERNEL | __GFP_NOWARN);
878         else
879                 args->vaddr = dma_alloc_wc(args->dev, size,
880                                            &args->dev_bus_addr,
881                                            GFP_KERNEL | __GFP_NOWARN);
882         if (!args->vaddr) {
883                 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
884                 return -ENOMEM;
885         }
886
887         start_pfn = __phys_to_pfn(args->dev_bus_addr);
888         for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
889                         pfn++, i++) {
890                 struct page *page = pfn_to_page(pfn);
891
892                 args->pages[i] = page;
893                 args->frames[i] = xen_page_to_gfn(page);
894                 xenmem_reservation_scrub_page(page);
895         }
896
897         xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
898
899         ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
900         if (ret != args->nr_pages) {
901                 pr_debug("Failed to decrease reservation for DMA buffer\n");
902                 ret = -EFAULT;
903                 goto fail;
904         }
905
906         ret = gnttab_pages_set_private(args->nr_pages, args->pages);
907         if (ret < 0)
908                 goto fail;
909
910         return 0;
911
912 fail:
913         gnttab_dma_free_pages(args);
914         return ret;
915 }
916 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
917
918 /**
919  * gnttab_dma_free_pages - free DMAable pages
920  * @args: arguments to the function
921  */
922 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
923 {
924         size_t size;
925         int i, ret;
926
927         gnttab_pages_clear_private(args->nr_pages, args->pages);
928
929         for (i = 0; i < args->nr_pages; i++)
930                 args->frames[i] = page_to_xen_pfn(args->pages[i]);
931
932         ret = xenmem_reservation_increase(args->nr_pages, args->frames);
933         if (ret != args->nr_pages) {
934                 pr_debug("Failed to increase reservation for DMA buffer\n");
935                 ret = -EFAULT;
936         } else {
937                 ret = 0;
938         }
939
940         xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
941                                              args->frames);
942
943         size = args->nr_pages << PAGE_SHIFT;
944         if (args->coherent)
945                 dma_free_coherent(args->dev, size,
946                                   args->vaddr, args->dev_bus_addr);
947         else
948                 dma_free_wc(args->dev, size,
949                             args->vaddr, args->dev_bus_addr);
950         return ret;
951 }
952 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
953 #endif
954
955 /* Handling of paged out grant targets (GNTST_eagain) */
956 #define MAX_DELAY 256
957 static inline void
958 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
959                                                 const char *func)
960 {
961         unsigned delay = 1;
962
963         do {
964                 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
965                 if (*status == GNTST_eagain)
966                         msleep(delay++);
967         } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
968
969         if (delay >= MAX_DELAY) {
970                 pr_err("%s: %s eagain grant\n", func, current->comm);
971                 *status = GNTST_bad_page;
972         }
973 }
974
975 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
976 {
977         struct gnttab_map_grant_ref *op;
978
979         if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
980                 BUG();
981         for (op = batch; op < batch + count; op++)
982                 if (op->status == GNTST_eagain)
983                         gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
984                                                 &op->status, __func__);
985 }
986 EXPORT_SYMBOL_GPL(gnttab_batch_map);
987
988 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
989 {
990         struct gnttab_copy *op;
991
992         if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
993                 BUG();
994         for (op = batch; op < batch + count; op++)
995                 if (op->status == GNTST_eagain)
996                         gnttab_retry_eagain_gop(GNTTABOP_copy, op,
997                                                 &op->status, __func__);
998 }
999 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1000
1001 void gnttab_foreach_grant_in_range(struct page *page,
1002                                    unsigned int offset,
1003                                    unsigned int len,
1004                                    xen_grant_fn_t fn,
1005                                    void *data)
1006 {
1007         unsigned int goffset;
1008         unsigned int glen;
1009         unsigned long xen_pfn;
1010
1011         len = min_t(unsigned int, PAGE_SIZE - offset, len);
1012         goffset = xen_offset_in_page(offset);
1013
1014         xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1015
1016         while (len) {
1017                 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1018                 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1019
1020                 goffset = 0;
1021                 xen_pfn++;
1022                 len -= glen;
1023         }
1024 }
1025 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1026
1027 void gnttab_foreach_grant(struct page **pages,
1028                           unsigned int nr_grefs,
1029                           xen_grant_fn_t fn,
1030                           void *data)
1031 {
1032         unsigned int goffset = 0;
1033         unsigned long xen_pfn = 0;
1034         unsigned int i;
1035
1036         for (i = 0; i < nr_grefs; i++) {
1037                 if ((i % XEN_PFN_PER_PAGE) == 0) {
1038                         xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1039                         goffset = 0;
1040                 }
1041
1042                 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1043
1044                 goffset += XEN_PAGE_SIZE;
1045                 xen_pfn++;
1046         }
1047 }
1048
1049 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1050                     struct gnttab_map_grant_ref *kmap_ops,
1051                     struct page **pages, unsigned int count)
1052 {
1053         int i, ret;
1054
1055         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1056         if (ret)
1057                 return ret;
1058
1059         for (i = 0; i < count; i++) {
1060                 switch (map_ops[i].status) {
1061                 case GNTST_okay:
1062                 {
1063                         struct xen_page_foreign *foreign;
1064
1065                         SetPageForeign(pages[i]);
1066                         foreign = xen_page_foreign(pages[i]);
1067                         foreign->domid = map_ops[i].dom;
1068                         foreign->gref = map_ops[i].ref;
1069                         break;
1070                 }
1071
1072                 case GNTST_no_device_space:
1073                         pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1074                         break;
1075
1076                 case GNTST_eagain:
1077                         /* Retry eagain maps */
1078                         gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1079                                                 map_ops + i,
1080                                                 &map_ops[i].status, __func__);
1081                         /* Test status in next loop iteration. */
1082                         i--;
1083                         break;
1084
1085                 default:
1086                         break;
1087                 }
1088         }
1089
1090         return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1091 }
1092 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1093
1094 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1095                       struct gnttab_unmap_grant_ref *kunmap_ops,
1096                       struct page **pages, unsigned int count)
1097 {
1098         unsigned int i;
1099         int ret;
1100
1101         ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1102         if (ret)
1103                 return ret;
1104
1105         for (i = 0; i < count; i++)
1106                 ClearPageForeign(pages[i]);
1107
1108         return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1109 }
1110 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1111
1112 #define GNTTAB_UNMAP_REFS_DELAY 5
1113
1114 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1115
1116 static void gnttab_unmap_work(struct work_struct *work)
1117 {
1118         struct gntab_unmap_queue_data
1119                 *unmap_data = container_of(work, 
1120                                            struct gntab_unmap_queue_data,
1121                                            gnttab_work.work);
1122         if (unmap_data->age != UINT_MAX)
1123                 unmap_data->age++;
1124         __gnttab_unmap_refs_async(unmap_data);
1125 }
1126
1127 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1128 {
1129         int ret;
1130         int pc;
1131
1132         for (pc = 0; pc < item->count; pc++) {
1133                 if (page_count(item->pages[pc]) > 1) {
1134                         unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1135                         schedule_delayed_work(&item->gnttab_work,
1136                                               msecs_to_jiffies(delay));
1137                         return;
1138                 }
1139         }
1140
1141         ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1142                                 item->pages, item->count);
1143         item->done(ret, item);
1144 }
1145
1146 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1147 {
1148         INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1149         item->age = 0;
1150
1151         __gnttab_unmap_refs_async(item);
1152 }
1153 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1154
1155 static void unmap_refs_callback(int result,
1156                 struct gntab_unmap_queue_data *data)
1157 {
1158         struct unmap_refs_callback_data *d = data->data;
1159
1160         d->result = result;
1161         complete(&d->completion);
1162 }
1163
1164 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1165 {
1166         struct unmap_refs_callback_data data;
1167
1168         init_completion(&data.completion);
1169         item->data = &data;
1170         item->done = &unmap_refs_callback;
1171         gnttab_unmap_refs_async(item);
1172         wait_for_completion(&data.completion);
1173
1174         return data.result;
1175 }
1176 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1177
1178 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1179 {
1180         return gnttab_frames(nr_grant_frames, SPP);
1181 }
1182
1183 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1184 {
1185         int rc;
1186
1187         rc = arch_gnttab_map_shared(frames, nr_gframes,
1188                                     gnttab_max_grant_frames(),
1189                                     &gnttab_shared.addr);
1190         BUG_ON(rc);
1191
1192         return 0;
1193 }
1194
1195 static void gnttab_unmap_frames_v1(void)
1196 {
1197         arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1198 }
1199
1200 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1201 {
1202         uint64_t *sframes;
1203         unsigned int nr_sframes;
1204         struct gnttab_get_status_frames getframes;
1205         int rc;
1206
1207         nr_sframes = nr_status_frames(nr_gframes);
1208
1209         /* No need for kzalloc as it is initialized in following hypercall
1210          * GNTTABOP_get_status_frames.
1211          */
1212         sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1213         if (!sframes)
1214                 return -ENOMEM;
1215
1216         getframes.dom        = DOMID_SELF;
1217         getframes.nr_frames  = nr_sframes;
1218         set_xen_guest_handle(getframes.frame_list, sframes);
1219
1220         rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1221                                        &getframes, 1);
1222         if (rc == -ENOSYS) {
1223                 kfree(sframes);
1224                 return -ENOSYS;
1225         }
1226
1227         BUG_ON(rc || getframes.status);
1228
1229         rc = arch_gnttab_map_status(sframes, nr_sframes,
1230                                     nr_status_frames(gnttab_max_grant_frames()),
1231                                     &grstatus);
1232         BUG_ON(rc);
1233         kfree(sframes);
1234
1235         rc = arch_gnttab_map_shared(frames, nr_gframes,
1236                                     gnttab_max_grant_frames(),
1237                                     &gnttab_shared.addr);
1238         BUG_ON(rc);
1239
1240         return 0;
1241 }
1242
1243 static void gnttab_unmap_frames_v2(void)
1244 {
1245         arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1246         arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1247 }
1248
1249 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1250 {
1251         struct gnttab_setup_table setup;
1252         xen_pfn_t *frames;
1253         unsigned int nr_gframes = end_idx + 1;
1254         int rc;
1255
1256         if (xen_feature(XENFEAT_auto_translated_physmap)) {
1257                 struct xen_add_to_physmap xatp;
1258                 unsigned int i = end_idx;
1259                 rc = 0;
1260                 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1261                 /*
1262                  * Loop backwards, so that the first hypercall has the largest
1263                  * index, ensuring that the table will grow only once.
1264                  */
1265                 do {
1266                         xatp.domid = DOMID_SELF;
1267                         xatp.idx = i;
1268                         xatp.space = XENMAPSPACE_grant_table;
1269                         xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1270                         rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1271                         if (rc != 0) {
1272                                 pr_warn("grant table add_to_physmap failed, err=%d\n",
1273                                         rc);
1274                                 break;
1275                         }
1276                 } while (i-- > start_idx);
1277
1278                 return rc;
1279         }
1280
1281         /* No need for kzalloc as it is initialized in following hypercall
1282          * GNTTABOP_setup_table.
1283          */
1284         frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1285         if (!frames)
1286                 return -ENOMEM;
1287
1288         setup.dom        = DOMID_SELF;
1289         setup.nr_frames  = nr_gframes;
1290         set_xen_guest_handle(setup.frame_list, frames);
1291
1292         rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1293         if (rc == -ENOSYS) {
1294                 kfree(frames);
1295                 return -ENOSYS;
1296         }
1297
1298         BUG_ON(rc || setup.status);
1299
1300         rc = gnttab_interface->map_frames(frames, nr_gframes);
1301
1302         kfree(frames);
1303
1304         return rc;
1305 }
1306
1307 static const struct gnttab_ops gnttab_v1_ops = {
1308         .version                        = 1,
1309         .grefs_per_grant_frame          = XEN_PAGE_SIZE /
1310                                           sizeof(struct grant_entry_v1),
1311         .map_frames                     = gnttab_map_frames_v1,
1312         .unmap_frames                   = gnttab_unmap_frames_v1,
1313         .update_entry                   = gnttab_update_entry_v1,
1314         .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
1315         .read_frame                     = gnttab_read_frame_v1,
1316 };
1317
1318 static const struct gnttab_ops gnttab_v2_ops = {
1319         .version                        = 2,
1320         .grefs_per_grant_frame          = XEN_PAGE_SIZE /
1321                                           sizeof(union grant_entry_v2),
1322         .map_frames                     = gnttab_map_frames_v2,
1323         .unmap_frames                   = gnttab_unmap_frames_v2,
1324         .update_entry                   = gnttab_update_entry_v2,
1325         .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v2,
1326         .read_frame                     = gnttab_read_frame_v2,
1327 };
1328
1329 static bool gnttab_need_v2(void)
1330 {
1331 #ifdef CONFIG_X86
1332         uint32_t base, width;
1333
1334         if (xen_pv_domain()) {
1335                 base = xen_cpuid_base();
1336                 if (cpuid_eax(base) < 5)
1337                         return false;   /* Information not available, use V1. */
1338                 width = cpuid_ebx(base + 5) &
1339                         XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1340                 return width > 32 + PAGE_SHIFT;
1341         }
1342 #endif
1343         return !!(max_possible_pfn >> 32);
1344 }
1345
1346 static void gnttab_request_version(void)
1347 {
1348         long rc;
1349         struct gnttab_set_version gsv;
1350
1351         if (gnttab_need_v2())
1352                 gsv.version = 2;
1353         else
1354                 gsv.version = 1;
1355
1356         /* Boot parameter overrides automatic selection. */
1357         if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1358                 gsv.version = xen_gnttab_version;
1359
1360         rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1361         if (rc == 0 && gsv.version == 2)
1362                 gnttab_interface = &gnttab_v2_ops;
1363         else
1364                 gnttab_interface = &gnttab_v1_ops;
1365         pr_info("Grant tables using version %d layout\n",
1366                 gnttab_interface->version);
1367 }
1368
1369 static int gnttab_setup(void)
1370 {
1371         unsigned int max_nr_gframes;
1372
1373         max_nr_gframes = gnttab_max_grant_frames();
1374         if (max_nr_gframes < nr_grant_frames)
1375                 return -ENOSYS;
1376
1377         if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1378                 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1379                 if (gnttab_shared.addr == NULL) {
1380                         pr_warn("gnttab share frames is not mapped!\n");
1381                         return -ENOMEM;
1382                 }
1383         }
1384         return gnttab_map(0, nr_grant_frames - 1);
1385 }
1386
1387 int gnttab_resume(void)
1388 {
1389         gnttab_request_version();
1390         return gnttab_setup();
1391 }
1392
1393 int gnttab_suspend(void)
1394 {
1395         if (!xen_feature(XENFEAT_auto_translated_physmap))
1396                 gnttab_interface->unmap_frames();
1397         return 0;
1398 }
1399
1400 static int gnttab_expand(unsigned int req_entries)
1401 {
1402         int rc;
1403         unsigned int cur, extra;
1404
1405         cur = nr_grant_frames;
1406         extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1407                  gnttab_interface->grefs_per_grant_frame);
1408         if (cur + extra > gnttab_max_grant_frames()) {
1409                 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1410                                     " cur=%u extra=%u limit=%u"
1411                                     " gnttab_free_count=%u req_entries=%u\n",
1412                                     cur, extra, gnttab_max_grant_frames(),
1413                                     gnttab_free_count, req_entries);
1414                 return -ENOSPC;
1415         }
1416
1417         rc = gnttab_map(cur, cur + extra - 1);
1418         if (rc == 0)
1419                 rc = grow_gnttab_list(extra);
1420
1421         return rc;
1422 }
1423
1424 int gnttab_init(void)
1425 {
1426         int i;
1427         unsigned long max_nr_grant_frames;
1428         unsigned int max_nr_glist_frames, nr_glist_frames;
1429         unsigned int nr_init_grefs;
1430         int ret;
1431
1432         gnttab_request_version();
1433         max_nr_grant_frames = gnttab_max_grant_frames();
1434         nr_grant_frames = 1;
1435
1436         /* Determine the maximum number of frames required for the
1437          * grant reference free list on the current hypervisor.
1438          */
1439         max_nr_glist_frames = (max_nr_grant_frames *
1440                                gnttab_interface->grefs_per_grant_frame / RPP);
1441
1442         gnttab_list = kmalloc_array(max_nr_glist_frames,
1443                                     sizeof(grant_ref_t *),
1444                                     GFP_KERNEL);
1445         if (gnttab_list == NULL)
1446                 return -ENOMEM;
1447
1448         nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1449         for (i = 0; i < nr_glist_frames; i++) {
1450                 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1451                 if (gnttab_list[i] == NULL) {
1452                         ret = -ENOMEM;
1453                         goto ini_nomem;
1454                 }
1455         }
1456
1457         ret = arch_gnttab_init(max_nr_grant_frames,
1458                                nr_status_frames(max_nr_grant_frames));
1459         if (ret < 0)
1460                 goto ini_nomem;
1461
1462         if (gnttab_setup() < 0) {
1463                 ret = -ENODEV;
1464                 goto ini_nomem;
1465         }
1466
1467         nr_init_grefs = nr_grant_frames *
1468                         gnttab_interface->grefs_per_grant_frame;
1469
1470         for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1471                 gnttab_entry(i) = i + 1;
1472
1473         gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1474         gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES;
1475         gnttab_free_head  = GNTTAB_NR_RESERVED_ENTRIES;
1476
1477         printk("Grant table initialized\n");
1478         return 0;
1479
1480  ini_nomem:
1481         for (i--; i >= 0; i--)
1482                 free_page((unsigned long)gnttab_list[i]);
1483         kfree(gnttab_list);
1484         return ret;
1485 }
1486 EXPORT_SYMBOL_GPL(gnttab_init);
1487
1488 static int __gnttab_init(void)
1489 {
1490         if (!xen_domain())
1491                 return -ENODEV;
1492
1493         /* Delay grant-table initialization in the PV on HVM case */
1494         if (xen_hvm_domain() && !xen_pvh_domain())
1495                 return 0;
1496
1497         return gnttab_init();
1498 }
1499 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1500  * beforehand to initialize xen_auto_xlat_grant_frames. */
1501 core_initcall_sync(__gnttab_init);