8ee13e2e45e2f8a5a29bb91eba7af714027d254d
[linux-2.6-microblaze.git] / drivers / xen / grant-table.c
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45
46 #include <xen/xen.h>
47 #include <xen/interface/xen.h>
48 #include <xen/page.h>
49 #include <xen/grant_table.h>
50 #include <xen/interface/memory.h>
51 #include <xen/hvc-console.h>
52 #include <xen/swiotlb-xen.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/interface.h>
55
56 #include <asm/pgtable.h>
57 #include <asm/sync_bitops.h>
58
59 /* External tools reserve first few grant table entries. */
60 #define NR_RESERVED_ENTRIES 8
61 #define GNTTAB_LIST_END 0xffffffff
62
63 static grant_ref_t **gnttab_list;
64 static unsigned int nr_grant_frames;
65 static int gnttab_free_count;
66 static grant_ref_t gnttab_free_head;
67 static DEFINE_SPINLOCK(gnttab_list_lock);
68 struct grant_frames xen_auto_xlat_grant_frames;
69
70 static union {
71         struct grant_entry_v1 *v1;
72         union grant_entry_v2 *v2;
73         void *addr;
74 } gnttab_shared;
75
76 /*This is a structure of function pointers for grant table*/
77 struct gnttab_ops {
78         /*
79          * Mapping a list of frames for storing grant entries. Frames parameter
80          * is used to store grant table address when grant table being setup,
81          * nr_gframes is the number of frames to map grant table. Returning
82          * GNTST_okay means success and negative value means failure.
83          */
84         int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
85         /*
86          * Release a list of frames which are mapped in map_frames for grant
87          * entry status.
88          */
89         void (*unmap_frames)(void);
90         /*
91          * Introducing a valid entry into the grant table, granting the frame of
92          * this grant entry to domain for accessing or transfering. Ref
93          * parameter is reference of this introduced grant entry, domid is id of
94          * granted domain, frame is the page frame to be granted, and flags is
95          * status of the grant entry to be updated.
96          */
97         void (*update_entry)(grant_ref_t ref, domid_t domid,
98                              unsigned long frame, unsigned flags);
99         /*
100          * Stop granting a grant entry to domain for accessing. Ref parameter is
101          * reference of a grant entry whose grant access will be stopped,
102          * readonly is not in use in this function. If the grant entry is
103          * currently mapped for reading or writing, just return failure(==0)
104          * directly and don't tear down the grant access. Otherwise, stop grant
105          * access for this entry and return success(==1).
106          */
107         int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
108         /*
109          * Stop granting a grant entry to domain for transfer. Ref parameter is
110          * reference of a grant entry whose grant transfer will be stopped. If
111          * tranfer has not started, just reclaim the grant entry and return
112          * failure(==0). Otherwise, wait for the transfer to complete and then
113          * return the frame.
114          */
115         unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
116         /*
117          * Query the status of a grant entry. Ref parameter is reference of
118          * queried grant entry, return value is the status of queried entry.
119          * Detailed status(writing/reading) can be gotten from the return value
120          * by bit operations.
121          */
122         int (*query_foreign_access)(grant_ref_t ref);
123         /*
124          * Grant a domain to access a range of bytes within the page referred by
125          * an available grant entry. Ref parameter is reference of a grant entry
126          * which will be sub-page accessed, domid is id of grantee domain, frame
127          * is frame address of subpage grant, flags is grant type and flag
128          * information, page_off is offset of the range of bytes, and length is
129          * length of bytes to be accessed.
130          */
131         void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
132                                      unsigned long frame, int flags,
133                                      unsigned page_off, unsigned length);
134         /*
135          * Redirect an available grant entry on domain A to another grant
136          * reference of domain B, then allow domain C to use grant reference
137          * of domain B transitively. Ref parameter is an available grant entry
138          * reference on domain A, domid is id of domain C which accesses grant
139          * entry transitively, flags is grant type and flag information,
140          * trans_domid is id of domain B whose grant entry is finally accessed
141          * transitively, trans_gref is grant entry transitive reference of
142          * domain B.
143          */
144         void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
145                                    domid_t trans_domid, grant_ref_t trans_gref);
146 };
147
148 static struct gnttab_ops *gnttab_interface;
149
150 /*This reflects status of grant entries, so act as a global value*/
151 static grant_status_t *grstatus;
152
153 static int grant_table_version;
154 static int grefs_per_grant_frame;
155
156 static struct gnttab_free_callback *gnttab_free_callback_list;
157
158 static int gnttab_expand(unsigned int req_entries);
159
160 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
161 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
162
163 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
164 {
165         return &gnttab_list[(entry) / RPP][(entry) % RPP];
166 }
167 /* This can be used as an l-value */
168 #define gnttab_entry(entry) (*__gnttab_entry(entry))
169
170 static int get_free_entries(unsigned count)
171 {
172         unsigned long flags;
173         int ref, rc = 0;
174         grant_ref_t head;
175
176         spin_lock_irqsave(&gnttab_list_lock, flags);
177
178         if ((gnttab_free_count < count) &&
179             ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
180                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
181                 return rc;
182         }
183
184         ref = head = gnttab_free_head;
185         gnttab_free_count -= count;
186         while (count-- > 1)
187                 head = gnttab_entry(head);
188         gnttab_free_head = gnttab_entry(head);
189         gnttab_entry(head) = GNTTAB_LIST_END;
190
191         spin_unlock_irqrestore(&gnttab_list_lock, flags);
192
193         return ref;
194 }
195
196 static void do_free_callbacks(void)
197 {
198         struct gnttab_free_callback *callback, *next;
199
200         callback = gnttab_free_callback_list;
201         gnttab_free_callback_list = NULL;
202
203         while (callback != NULL) {
204                 next = callback->next;
205                 if (gnttab_free_count >= callback->count) {
206                         callback->next = NULL;
207                         callback->fn(callback->arg);
208                 } else {
209                         callback->next = gnttab_free_callback_list;
210                         gnttab_free_callback_list = callback;
211                 }
212                 callback = next;
213         }
214 }
215
216 static inline void check_free_callbacks(void)
217 {
218         if (unlikely(gnttab_free_callback_list))
219                 do_free_callbacks();
220 }
221
222 static void put_free_entry(grant_ref_t ref)
223 {
224         unsigned long flags;
225         spin_lock_irqsave(&gnttab_list_lock, flags);
226         gnttab_entry(ref) = gnttab_free_head;
227         gnttab_free_head = ref;
228         gnttab_free_count++;
229         check_free_callbacks();
230         spin_unlock_irqrestore(&gnttab_list_lock, flags);
231 }
232
233 /*
234  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
235  * Introducing a valid entry into the grant table:
236  *  1. Write ent->domid.
237  *  2. Write ent->frame:
238  *      GTF_permit_access:   Frame to which access is permitted.
239  *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
240  *                           frame, or zero if none.
241  *  3. Write memory barrier (WMB).
242  *  4. Write ent->flags, inc. valid type.
243  */
244 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
245                                    unsigned long frame, unsigned flags)
246 {
247         gnttab_shared.v1[ref].domid = domid;
248         gnttab_shared.v1[ref].frame = frame;
249         wmb();
250         gnttab_shared.v1[ref].flags = flags;
251 }
252
253 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
254                                    unsigned long frame, unsigned flags)
255 {
256         gnttab_shared.v2[ref].hdr.domid = domid;
257         gnttab_shared.v2[ref].full_page.frame = frame;
258         wmb();
259         gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
260 }
261
262 /*
263  * Public grant-issuing interface functions
264  */
265 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
266                                      unsigned long frame, int readonly)
267 {
268         gnttab_interface->update_entry(ref, domid, frame,
269                            GTF_permit_access | (readonly ? GTF_readonly : 0));
270 }
271 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
272
273 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274                                 int readonly)
275 {
276         int ref;
277
278         ref = get_free_entries(1);
279         if (unlikely(ref < 0))
280                 return -ENOSPC;
281
282         gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
283
284         return ref;
285 }
286 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287
288 static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
289                                            unsigned long frame, int flags,
290                                            unsigned page_off, unsigned length)
291 {
292         gnttab_shared.v2[ref].sub_page.frame = frame;
293         gnttab_shared.v2[ref].sub_page.page_off = page_off;
294         gnttab_shared.v2[ref].sub_page.length = length;
295         gnttab_shared.v2[ref].hdr.domid = domid;
296         wmb();
297         gnttab_shared.v2[ref].hdr.flags =
298                                 GTF_permit_access | GTF_sub_page | flags;
299 }
300
301 int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
302                                             unsigned long frame, int flags,
303                                             unsigned page_off,
304                                             unsigned length)
305 {
306         if (flags & (GTF_accept_transfer | GTF_reading |
307                      GTF_writing | GTF_transitive))
308                 return -EPERM;
309
310         if (gnttab_interface->update_subpage_entry == NULL)
311                 return -ENOSYS;
312
313         gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
314                                                page_off, length);
315
316         return 0;
317 }
318 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
319
320 int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
321                                         int flags, unsigned page_off,
322                                         unsigned length)
323 {
324         int ref, rc;
325
326         ref = get_free_entries(1);
327         if (unlikely(ref < 0))
328                 return -ENOSPC;
329
330         rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
331                                                      page_off, length);
332         if (rc < 0) {
333                 put_free_entry(ref);
334                 return rc;
335         }
336
337         return ref;
338 }
339 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
340
341 bool gnttab_subpage_grants_available(void)
342 {
343         return gnttab_interface->update_subpage_entry != NULL;
344 }
345 EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
346
347 static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
348                                          int flags, domid_t trans_domid,
349                                          grant_ref_t trans_gref)
350 {
351         gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
352         gnttab_shared.v2[ref].transitive.gref = trans_gref;
353         gnttab_shared.v2[ref].hdr.domid = domid;
354         wmb();
355         gnttab_shared.v2[ref].hdr.flags =
356                                 GTF_permit_access | GTF_transitive | flags;
357 }
358
359 int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
360                                           int flags, domid_t trans_domid,
361                                           grant_ref_t trans_gref)
362 {
363         if (flags & (GTF_accept_transfer | GTF_reading |
364                      GTF_writing | GTF_sub_page))
365                 return -EPERM;
366
367         if (gnttab_interface->update_trans_entry == NULL)
368                 return -ENOSYS;
369
370         gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
371                                              trans_gref);
372
373         return 0;
374 }
375 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
376
377 int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
378                                       domid_t trans_domid,
379                                       grant_ref_t trans_gref)
380 {
381         int ref, rc;
382
383         ref = get_free_entries(1);
384         if (unlikely(ref < 0))
385                 return -ENOSPC;
386
387         rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
388                                                    trans_domid, trans_gref);
389         if (rc < 0) {
390                 put_free_entry(ref);
391                 return rc;
392         }
393
394         return ref;
395 }
396 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
397
398 bool gnttab_trans_grants_available(void)
399 {
400         return gnttab_interface->update_trans_entry != NULL;
401 }
402 EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
403
404 static int gnttab_query_foreign_access_v1(grant_ref_t ref)
405 {
406         return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
407 }
408
409 static int gnttab_query_foreign_access_v2(grant_ref_t ref)
410 {
411         return grstatus[ref] & (GTF_reading|GTF_writing);
412 }
413
414 int gnttab_query_foreign_access(grant_ref_t ref)
415 {
416         return gnttab_interface->query_foreign_access(ref);
417 }
418 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
419
420 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
421 {
422         u16 flags, nflags;
423         u16 *pflags;
424
425         pflags = &gnttab_shared.v1[ref].flags;
426         nflags = *pflags;
427         do {
428                 flags = nflags;
429                 if (flags & (GTF_reading|GTF_writing))
430                         return 0;
431         } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
432
433         return 1;
434 }
435
436 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
437 {
438         gnttab_shared.v2[ref].hdr.flags = 0;
439         mb();
440         if (grstatus[ref] & (GTF_reading|GTF_writing)) {
441                 return 0;
442         } else {
443                 /* The read of grstatus needs to have acquire
444                 semantics.  On x86, reads already have
445                 that, and we just need to protect against
446                 compiler reorderings.  On other
447                 architectures we may need a full
448                 barrier. */
449 #ifdef CONFIG_X86
450                 barrier();
451 #else
452                 mb();
453 #endif
454         }
455
456         return 1;
457 }
458
459 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
460 {
461         return gnttab_interface->end_foreign_access_ref(ref, readonly);
462 }
463
464 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
465 {
466         if (_gnttab_end_foreign_access_ref(ref, readonly))
467                 return 1;
468         pr_warn("WARNING: g.e. %#x still in use!\n", ref);
469         return 0;
470 }
471 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
472
473 struct deferred_entry {
474         struct list_head list;
475         grant_ref_t ref;
476         bool ro;
477         uint16_t warn_delay;
478         struct page *page;
479 };
480 static LIST_HEAD(deferred_list);
481 static void gnttab_handle_deferred(unsigned long);
482 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
483
484 static void gnttab_handle_deferred(unsigned long unused)
485 {
486         unsigned int nr = 10;
487         struct deferred_entry *first = NULL;
488         unsigned long flags;
489
490         spin_lock_irqsave(&gnttab_list_lock, flags);
491         while (nr--) {
492                 struct deferred_entry *entry
493                         = list_first_entry(&deferred_list,
494                                            struct deferred_entry, list);
495
496                 if (entry == first)
497                         break;
498                 list_del(&entry->list);
499                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
500                 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
501                         put_free_entry(entry->ref);
502                         if (entry->page) {
503                                 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
504                                          entry->ref, page_to_pfn(entry->page));
505                                 __free_page(entry->page);
506                         } else
507                                 pr_info("freeing g.e. %#x\n", entry->ref);
508                         kfree(entry);
509                         entry = NULL;
510                 } else {
511                         if (!--entry->warn_delay)
512                                 pr_info("g.e. %#x still pending\n", entry->ref);
513                         if (!first)
514                                 first = entry;
515                 }
516                 spin_lock_irqsave(&gnttab_list_lock, flags);
517                 if (entry)
518                         list_add_tail(&entry->list, &deferred_list);
519                 else if (list_empty(&deferred_list))
520                         break;
521         }
522         if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
523                 deferred_timer.expires = jiffies + HZ;
524                 add_timer(&deferred_timer);
525         }
526         spin_unlock_irqrestore(&gnttab_list_lock, flags);
527 }
528
529 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
530                                 struct page *page)
531 {
532         struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
533         const char *what = KERN_WARNING "leaking";
534
535         if (entry) {
536                 unsigned long flags;
537
538                 entry->ref = ref;
539                 entry->ro = readonly;
540                 entry->page = page;
541                 entry->warn_delay = 60;
542                 spin_lock_irqsave(&gnttab_list_lock, flags);
543                 list_add_tail(&entry->list, &deferred_list);
544                 if (!timer_pending(&deferred_timer)) {
545                         deferred_timer.expires = jiffies + HZ;
546                         add_timer(&deferred_timer);
547                 }
548                 spin_unlock_irqrestore(&gnttab_list_lock, flags);
549                 what = KERN_DEBUG "deferring";
550         }
551         printk("%s g.e. %#x (pfn %#lx)\n",
552                what, ref, page ? page_to_pfn(page) : -1);
553 }
554
555 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
556                                unsigned long page)
557 {
558         if (gnttab_end_foreign_access_ref(ref, readonly)) {
559                 put_free_entry(ref);
560                 if (page != 0)
561                         free_page(page);
562         } else
563                 gnttab_add_deferred(ref, readonly,
564                                     page ? virt_to_page(page) : NULL);
565 }
566 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
567
568 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
569 {
570         int ref;
571
572         ref = get_free_entries(1);
573         if (unlikely(ref < 0))
574                 return -ENOSPC;
575         gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
576
577         return ref;
578 }
579 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
580
581 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
582                                        unsigned long pfn)
583 {
584         gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
585 }
586 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
587
588 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
589 {
590         unsigned long frame;
591         u16           flags;
592         u16          *pflags;
593
594         pflags = &gnttab_shared.v1[ref].flags;
595
596         /*
597          * If a transfer is not even yet started, try to reclaim the grant
598          * reference and return failure (== 0).
599          */
600         while (!((flags = *pflags) & GTF_transfer_committed)) {
601                 if (sync_cmpxchg(pflags, flags, 0) == flags)
602                         return 0;
603                 cpu_relax();
604         }
605
606         /* If a transfer is in progress then wait until it is completed. */
607         while (!(flags & GTF_transfer_completed)) {
608                 flags = *pflags;
609                 cpu_relax();
610         }
611
612         rmb();  /* Read the frame number /after/ reading completion status. */
613         frame = gnttab_shared.v1[ref].frame;
614         BUG_ON(frame == 0);
615
616         return frame;
617 }
618
619 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
620 {
621         unsigned long frame;
622         u16           flags;
623         u16          *pflags;
624
625         pflags = &gnttab_shared.v2[ref].hdr.flags;
626
627         /*
628          * If a transfer is not even yet started, try to reclaim the grant
629          * reference and return failure (== 0).
630          */
631         while (!((flags = *pflags) & GTF_transfer_committed)) {
632                 if (sync_cmpxchg(pflags, flags, 0) == flags)
633                         return 0;
634                 cpu_relax();
635         }
636
637         /* If a transfer is in progress then wait until it is completed. */
638         while (!(flags & GTF_transfer_completed)) {
639                 flags = *pflags;
640                 cpu_relax();
641         }
642
643         rmb();  /* Read the frame number /after/ reading completion status. */
644         frame = gnttab_shared.v2[ref].full_page.frame;
645         BUG_ON(frame == 0);
646
647         return frame;
648 }
649
650 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
651 {
652         return gnttab_interface->end_foreign_transfer_ref(ref);
653 }
654 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
655
656 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
657 {
658         unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
659         put_free_entry(ref);
660         return frame;
661 }
662 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
663
664 void gnttab_free_grant_reference(grant_ref_t ref)
665 {
666         put_free_entry(ref);
667 }
668 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
669
670 void gnttab_free_grant_references(grant_ref_t head)
671 {
672         grant_ref_t ref;
673         unsigned long flags;
674         int count = 1;
675         if (head == GNTTAB_LIST_END)
676                 return;
677         spin_lock_irqsave(&gnttab_list_lock, flags);
678         ref = head;
679         while (gnttab_entry(ref) != GNTTAB_LIST_END) {
680                 ref = gnttab_entry(ref);
681                 count++;
682         }
683         gnttab_entry(ref) = gnttab_free_head;
684         gnttab_free_head = head;
685         gnttab_free_count += count;
686         check_free_callbacks();
687         spin_unlock_irqrestore(&gnttab_list_lock, flags);
688 }
689 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
690
691 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
692 {
693         int h = get_free_entries(count);
694
695         if (h < 0)
696                 return -ENOSPC;
697
698         *head = h;
699
700         return 0;
701 }
702 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
703
704 int gnttab_empty_grant_references(const grant_ref_t *private_head)
705 {
706         return (*private_head == GNTTAB_LIST_END);
707 }
708 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
709
710 int gnttab_claim_grant_reference(grant_ref_t *private_head)
711 {
712         grant_ref_t g = *private_head;
713         if (unlikely(g == GNTTAB_LIST_END))
714                 return -ENOSPC;
715         *private_head = gnttab_entry(g);
716         return g;
717 }
718 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
719
720 void gnttab_release_grant_reference(grant_ref_t *private_head,
721                                     grant_ref_t release)
722 {
723         gnttab_entry(release) = *private_head;
724         *private_head = release;
725 }
726 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
727
728 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
729                                   void (*fn)(void *), void *arg, u16 count)
730 {
731         unsigned long flags;
732         struct gnttab_free_callback *cb;
733
734         spin_lock_irqsave(&gnttab_list_lock, flags);
735
736         /* Check if the callback is already on the list */
737         cb = gnttab_free_callback_list;
738         while (cb) {
739                 if (cb == callback)
740                         goto out;
741                 cb = cb->next;
742         }
743
744         callback->fn = fn;
745         callback->arg = arg;
746         callback->count = count;
747         callback->next = gnttab_free_callback_list;
748         gnttab_free_callback_list = callback;
749         check_free_callbacks();
750 out:
751         spin_unlock_irqrestore(&gnttab_list_lock, flags);
752 }
753 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
754
755 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
756 {
757         struct gnttab_free_callback **pcb;
758         unsigned long flags;
759
760         spin_lock_irqsave(&gnttab_list_lock, flags);
761         for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
762                 if (*pcb == callback) {
763                         *pcb = callback->next;
764                         break;
765                 }
766         }
767         spin_unlock_irqrestore(&gnttab_list_lock, flags);
768 }
769 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
770
771 static int grow_gnttab_list(unsigned int more_frames)
772 {
773         unsigned int new_nr_grant_frames, extra_entries, i;
774         unsigned int nr_glist_frames, new_nr_glist_frames;
775
776         BUG_ON(grefs_per_grant_frame == 0);
777
778         new_nr_grant_frames = nr_grant_frames + more_frames;
779         extra_entries       = more_frames * grefs_per_grant_frame;
780
781         nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
782         new_nr_glist_frames =
783                 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
784         for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
785                 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
786                 if (!gnttab_list[i])
787                         goto grow_nomem;
788         }
789
790
791         for (i = grefs_per_grant_frame * nr_grant_frames;
792              i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
793                 gnttab_entry(i) = i + 1;
794
795         gnttab_entry(i) = gnttab_free_head;
796         gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
797         gnttab_free_count += extra_entries;
798
799         nr_grant_frames = new_nr_grant_frames;
800
801         check_free_callbacks();
802
803         return 0;
804
805 grow_nomem:
806         for ( ; i >= nr_glist_frames; i--)
807                 free_page((unsigned long) gnttab_list[i]);
808         return -ENOMEM;
809 }
810
811 static unsigned int __max_nr_grant_frames(void)
812 {
813         struct gnttab_query_size query;
814         int rc;
815
816         query.dom = DOMID_SELF;
817
818         rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
819         if ((rc < 0) || (query.status != GNTST_okay))
820                 return 4; /* Legacy max supported number of frames */
821
822         return query.max_nr_frames;
823 }
824
825 unsigned int gnttab_max_grant_frames(void)
826 {
827         unsigned int xen_max = __max_nr_grant_frames();
828         static unsigned int boot_max_nr_grant_frames;
829
830         /* First time, initialize it properly. */
831         if (!boot_max_nr_grant_frames)
832                 boot_max_nr_grant_frames = __max_nr_grant_frames();
833
834         if (xen_max > boot_max_nr_grant_frames)
835                 return boot_max_nr_grant_frames;
836         return xen_max;
837 }
838 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
839
840 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
841 {
842         xen_pfn_t *pfn;
843         unsigned int max_nr_gframes = __max_nr_grant_frames();
844         unsigned int i;
845         void *vaddr;
846
847         if (xen_auto_xlat_grant_frames.count)
848                 return -EINVAL;
849
850         vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
851         if (vaddr == NULL) {
852                 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
853                         &addr);
854                 return -ENOMEM;
855         }
856         pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
857         if (!pfn) {
858                 xen_unmap(vaddr);
859                 return -ENOMEM;
860         }
861         for (i = 0; i < max_nr_gframes; i++)
862                 pfn[i] = PFN_DOWN(addr) + i;
863
864         xen_auto_xlat_grant_frames.vaddr = vaddr;
865         xen_auto_xlat_grant_frames.pfn = pfn;
866         xen_auto_xlat_grant_frames.count = max_nr_gframes;
867
868         return 0;
869 }
870 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
871
872 void gnttab_free_auto_xlat_frames(void)
873 {
874         if (!xen_auto_xlat_grant_frames.count)
875                 return;
876         kfree(xen_auto_xlat_grant_frames.pfn);
877         xen_unmap(xen_auto_xlat_grant_frames.vaddr);
878
879         xen_auto_xlat_grant_frames.pfn = NULL;
880         xen_auto_xlat_grant_frames.count = 0;
881         xen_auto_xlat_grant_frames.vaddr = NULL;
882 }
883 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
884
885 /* Handling of paged out grant targets (GNTST_eagain) */
886 #define MAX_DELAY 256
887 static inline void
888 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
889                                                 const char *func)
890 {
891         unsigned delay = 1;
892
893         do {
894                 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
895                 if (*status == GNTST_eagain)
896                         msleep(delay++);
897         } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
898
899         if (delay >= MAX_DELAY) {
900                 pr_err("%s: %s eagain grant\n", func, current->comm);
901                 *status = GNTST_bad_page;
902         }
903 }
904
905 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
906 {
907         struct gnttab_map_grant_ref *op;
908
909         if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
910                 BUG();
911         for (op = batch; op < batch + count; op++)
912                 if (op->status == GNTST_eagain)
913                         gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
914                                                 &op->status, __func__);
915 }
916 EXPORT_SYMBOL_GPL(gnttab_batch_map);
917
918 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
919 {
920         struct gnttab_copy *op;
921
922         if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
923                 BUG();
924         for (op = batch; op < batch + count; op++)
925                 if (op->status == GNTST_eagain)
926                         gnttab_retry_eagain_gop(GNTTABOP_copy, op,
927                                                 &op->status, __func__);
928 }
929 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
930
931 int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
932                     struct gnttab_map_grant_ref *kmap_ops,
933                     struct page **pages, unsigned int count,
934                     bool m2p_override)
935 {
936         int i, ret;
937         bool lazy = false;
938         pte_t *pte;
939         unsigned long mfn, pfn;
940
941         BUG_ON(kmap_ops && !m2p_override);
942         ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
943         if (ret)
944                 return ret;
945
946         /* Retry eagain maps */
947         for (i = 0; i < count; i++)
948                 if (map_ops[i].status == GNTST_eagain)
949                         gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
950                                                 &map_ops[i].status, __func__);
951
952         /* this is basically a nop on x86 */
953         if (xen_feature(XENFEAT_auto_translated_physmap)) {
954                 for (i = 0; i < count; i++) {
955                         if (map_ops[i].status)
956                                 continue;
957                         set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
958                                         map_ops[i].dev_bus_addr >> PAGE_SHIFT);
959                 }
960                 return 0;
961         }
962
963         if (m2p_override &&
964             !in_interrupt() &&
965             paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
966                 arch_enter_lazy_mmu_mode();
967                 lazy = true;
968         }
969
970         for (i = 0; i < count; i++) {
971                 /* Do not add to override if the map failed. */
972                 if (map_ops[i].status)
973                         continue;
974
975                 if (map_ops[i].flags & GNTMAP_contains_pte) {
976                         pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
977                                 (map_ops[i].host_addr & ~PAGE_MASK));
978                         mfn = pte_mfn(*pte);
979                 } else {
980                         mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
981                 }
982                 pfn = page_to_pfn(pages[i]);
983
984                 WARN_ON(PagePrivate(pages[i]));
985                 SetPagePrivate(pages[i]);
986                 set_page_private(pages[i], mfn);
987
988                 pages[i]->index = pfn_to_mfn(pfn);
989                 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
990                         ret = -ENOMEM;
991                         goto out;
992                 }
993                 if (m2p_override)
994                         ret = m2p_add_override(mfn, pages[i], kmap_ops ?
995                                                &kmap_ops[i] : NULL);
996                 if (ret)
997                         goto out;
998         }
999
1000  out:
1001         if (lazy)
1002                 arch_leave_lazy_mmu_mode();
1003
1004         return ret;
1005 }
1006
1007 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1008                     struct page **pages, unsigned int count)
1009 {
1010         return __gnttab_map_refs(map_ops, NULL, pages, count, false);
1011 }
1012 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1013
1014 int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
1015                               struct gnttab_map_grant_ref *kmap_ops,
1016                               struct page **pages, unsigned int count)
1017 {
1018         return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
1019 }
1020 EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
1021
1022 int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1023                       struct gnttab_map_grant_ref *kmap_ops,
1024                       struct page **pages, unsigned int count,
1025                       bool m2p_override)
1026 {
1027         int i, ret;
1028         bool lazy = false;
1029         unsigned long pfn, mfn;
1030
1031         BUG_ON(kmap_ops && !m2p_override);
1032         ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1033         if (ret)
1034                 return ret;
1035
1036         /* this is basically a nop on x86 */
1037         if (xen_feature(XENFEAT_auto_translated_physmap)) {
1038                 for (i = 0; i < count; i++) {
1039                         set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
1040                                         INVALID_P2M_ENTRY);
1041                 }
1042                 return 0;
1043         }
1044
1045         if (m2p_override &&
1046             !in_interrupt() &&
1047             paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1048                 arch_enter_lazy_mmu_mode();
1049                 lazy = true;
1050         }
1051
1052         for (i = 0; i < count; i++) {
1053                 pfn = page_to_pfn(pages[i]);
1054                 mfn = get_phys_to_machine(pfn);
1055                 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
1056                         ret = -EINVAL;
1057                         goto out;
1058                 }
1059
1060                 set_page_private(pages[i], INVALID_P2M_ENTRY);
1061                 WARN_ON(!PagePrivate(pages[i]));
1062                 ClearPagePrivate(pages[i]);
1063                 set_phys_to_machine(pfn, pages[i]->index);
1064                 if (m2p_override)
1065                         ret = m2p_remove_override(pages[i],
1066                                                   kmap_ops ?
1067                                                    &kmap_ops[i] : NULL,
1068                                                   mfn);
1069                 if (ret)
1070                         goto out;
1071         }
1072
1073  out:
1074         if (lazy)
1075                 arch_leave_lazy_mmu_mode();
1076
1077         return ret;
1078 }
1079
1080 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
1081                     struct page **pages, unsigned int count)
1082 {
1083         return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
1084 }
1085 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1086
1087 int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
1088                                 struct gnttab_map_grant_ref *kmap_ops,
1089                                 struct page **pages, unsigned int count)
1090 {
1091         return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
1092 }
1093 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
1094
1095 static unsigned nr_status_frames(unsigned nr_grant_frames)
1096 {
1097         BUG_ON(grefs_per_grant_frame == 0);
1098         return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
1099 }
1100
1101 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1102 {
1103         int rc;
1104
1105         rc = arch_gnttab_map_shared(frames, nr_gframes,
1106                                     gnttab_max_grant_frames(),
1107                                     &gnttab_shared.addr);
1108         BUG_ON(rc);
1109
1110         return 0;
1111 }
1112
1113 static void gnttab_unmap_frames_v1(void)
1114 {
1115         arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1116 }
1117
1118 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1119 {
1120         uint64_t *sframes;
1121         unsigned int nr_sframes;
1122         struct gnttab_get_status_frames getframes;
1123         int rc;
1124
1125         nr_sframes = nr_status_frames(nr_gframes);
1126
1127         /* No need for kzalloc as it is initialized in following hypercall
1128          * GNTTABOP_get_status_frames.
1129          */
1130         sframes = kmalloc(nr_sframes  * sizeof(uint64_t), GFP_ATOMIC);
1131         if (!sframes)
1132                 return -ENOMEM;
1133
1134         getframes.dom        = DOMID_SELF;
1135         getframes.nr_frames  = nr_sframes;
1136         set_xen_guest_handle(getframes.frame_list, sframes);
1137
1138         rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1139                                        &getframes, 1);
1140         if (rc == -ENOSYS) {
1141                 kfree(sframes);
1142                 return -ENOSYS;
1143         }
1144
1145         BUG_ON(rc || getframes.status);
1146
1147         rc = arch_gnttab_map_status(sframes, nr_sframes,
1148                                     nr_status_frames(gnttab_max_grant_frames()),
1149                                     &grstatus);
1150         BUG_ON(rc);
1151         kfree(sframes);
1152
1153         rc = arch_gnttab_map_shared(frames, nr_gframes,
1154                                     gnttab_max_grant_frames(),
1155                                     &gnttab_shared.addr);
1156         BUG_ON(rc);
1157
1158         return 0;
1159 }
1160
1161 static void gnttab_unmap_frames_v2(void)
1162 {
1163         arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1164         arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1165 }
1166
1167 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1168 {
1169         struct gnttab_setup_table setup;
1170         xen_pfn_t *frames;
1171         unsigned int nr_gframes = end_idx + 1;
1172         int rc;
1173
1174         if (xen_feature(XENFEAT_auto_translated_physmap)) {
1175                 struct xen_add_to_physmap xatp;
1176                 unsigned int i = end_idx;
1177                 rc = 0;
1178                 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1179                 /*
1180                  * Loop backwards, so that the first hypercall has the largest
1181                  * index, ensuring that the table will grow only once.
1182                  */
1183                 do {
1184                         xatp.domid = DOMID_SELF;
1185                         xatp.idx = i;
1186                         xatp.space = XENMAPSPACE_grant_table;
1187                         xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1188                         rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1189                         if (rc != 0) {
1190                                 pr_warn("grant table add_to_physmap failed, err=%d\n",
1191                                         rc);
1192                                 break;
1193                         }
1194                 } while (i-- > start_idx);
1195
1196                 return rc;
1197         }
1198
1199         /* No need for kzalloc as it is initialized in following hypercall
1200          * GNTTABOP_setup_table.
1201          */
1202         frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
1203         if (!frames)
1204                 return -ENOMEM;
1205
1206         setup.dom        = DOMID_SELF;
1207         setup.nr_frames  = nr_gframes;
1208         set_xen_guest_handle(setup.frame_list, frames);
1209
1210         rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1211         if (rc == -ENOSYS) {
1212                 kfree(frames);
1213                 return -ENOSYS;
1214         }
1215
1216         BUG_ON(rc || setup.status);
1217
1218         rc = gnttab_interface->map_frames(frames, nr_gframes);
1219
1220         kfree(frames);
1221
1222         return rc;
1223 }
1224
1225 static struct gnttab_ops gnttab_v1_ops = {
1226         .map_frames                     = gnttab_map_frames_v1,
1227         .unmap_frames                   = gnttab_unmap_frames_v1,
1228         .update_entry                   = gnttab_update_entry_v1,
1229         .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v1,
1230         .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v1,
1231         .query_foreign_access           = gnttab_query_foreign_access_v1,
1232 };
1233
1234 static struct gnttab_ops gnttab_v2_ops = {
1235         .map_frames                     = gnttab_map_frames_v2,
1236         .unmap_frames                   = gnttab_unmap_frames_v2,
1237         .update_entry                   = gnttab_update_entry_v2,
1238         .end_foreign_access_ref         = gnttab_end_foreign_access_ref_v2,
1239         .end_foreign_transfer_ref       = gnttab_end_foreign_transfer_ref_v2,
1240         .query_foreign_access           = gnttab_query_foreign_access_v2,
1241         .update_subpage_entry           = gnttab_update_subpage_entry_v2,
1242         .update_trans_entry             = gnttab_update_trans_entry_v2,
1243 };
1244
1245 static void gnttab_request_version(void)
1246 {
1247         int rc;
1248         struct gnttab_set_version gsv;
1249
1250         gsv.version = 1;
1251
1252         rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1253         if (rc == 0 && gsv.version == 2) {
1254                 grant_table_version = 2;
1255                 grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
1256                 gnttab_interface = &gnttab_v2_ops;
1257         } else if (grant_table_version == 2) {
1258                 /*
1259                  * If we've already used version 2 features,
1260                  * but then suddenly discover that they're not
1261                  * available (e.g. migrating to an older
1262                  * version of Xen), almost unbounded badness
1263                  * can happen.
1264                  */
1265                 panic("we need grant tables version 2, but only version 1 is available");
1266         } else {
1267                 grant_table_version = 1;
1268                 grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
1269                 gnttab_interface = &gnttab_v1_ops;
1270         }
1271         pr_info("Grant tables using version %d layout\n", grant_table_version);
1272 }
1273
1274 static int gnttab_setup(void)
1275 {
1276         unsigned int max_nr_gframes;
1277
1278         max_nr_gframes = gnttab_max_grant_frames();
1279         if (max_nr_gframes < nr_grant_frames)
1280                 return -ENOSYS;
1281
1282         if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1283                 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1284                 if (gnttab_shared.addr == NULL) {
1285                         pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
1286                                 (unsigned long)xen_auto_xlat_grant_frames.vaddr);
1287                         return -ENOMEM;
1288                 }
1289         }
1290         return gnttab_map(0, nr_grant_frames - 1);
1291 }
1292
1293 int gnttab_resume(void)
1294 {
1295         gnttab_request_version();
1296         return gnttab_setup();
1297 }
1298
1299 int gnttab_suspend(void)
1300 {
1301         gnttab_interface->unmap_frames();
1302         return 0;
1303 }
1304
1305 static int gnttab_expand(unsigned int req_entries)
1306 {
1307         int rc;
1308         unsigned int cur, extra;
1309
1310         BUG_ON(grefs_per_grant_frame == 0);
1311         cur = nr_grant_frames;
1312         extra = ((req_entries + (grefs_per_grant_frame-1)) /
1313                  grefs_per_grant_frame);
1314         if (cur + extra > gnttab_max_grant_frames())
1315                 return -ENOSPC;
1316
1317         rc = gnttab_map(cur, cur + extra - 1);
1318         if (rc == 0)
1319                 rc = grow_gnttab_list(extra);
1320
1321         return rc;
1322 }
1323
1324 int gnttab_init(void)
1325 {
1326         int i;
1327         unsigned int max_nr_glist_frames, nr_glist_frames;
1328         unsigned int nr_init_grefs;
1329         int ret;
1330
1331         gnttab_request_version();
1332         nr_grant_frames = 1;
1333
1334         /* Determine the maximum number of frames required for the
1335          * grant reference free list on the current hypervisor.
1336          */
1337         BUG_ON(grefs_per_grant_frame == 0);
1338         max_nr_glist_frames = (gnttab_max_grant_frames() *
1339                                grefs_per_grant_frame / RPP);
1340
1341         gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1342                               GFP_KERNEL);
1343         if (gnttab_list == NULL)
1344                 return -ENOMEM;
1345
1346         nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1347         for (i = 0; i < nr_glist_frames; i++) {
1348                 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1349                 if (gnttab_list[i] == NULL) {
1350                         ret = -ENOMEM;
1351                         goto ini_nomem;
1352                 }
1353         }
1354
1355         if (gnttab_setup() < 0) {
1356                 ret = -ENODEV;
1357                 goto ini_nomem;
1358         }
1359
1360         nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1361
1362         for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1363                 gnttab_entry(i) = i + 1;
1364
1365         gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1366         gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1367         gnttab_free_head  = NR_RESERVED_ENTRIES;
1368
1369         printk("Grant table initialized\n");
1370         return 0;
1371
1372  ini_nomem:
1373         for (i--; i >= 0; i--)
1374                 free_page((unsigned long)gnttab_list[i]);
1375         kfree(gnttab_list);
1376         return ret;
1377 }
1378 EXPORT_SYMBOL_GPL(gnttab_init);
1379
1380 static int __gnttab_init(void)
1381 {
1382         /* Delay grant-table initialization in the PV on HVM case */
1383         if (xen_hvm_domain())
1384                 return 0;
1385
1386         if (!xen_pv_domain())
1387                 return -ENODEV;
1388
1389         return gnttab_init();
1390 }
1391 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1392  * beforehand to initialize xen_auto_xlat_grant_frames. */
1393 core_initcall_sync(__gnttab_init);