Merge tag 'ovl-update-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs
[linux-2.6-microblaze.git] / drivers / xen / xen-front-pgdir-shbuf.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4  * Xen frontend/backend page directory based shared buffer
5  * helper module.
6  *
7  * Copyright (C) 2018 EPAM Systems Inc.
8  *
9  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10  */
11
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15
16 #include <asm/xen/hypervisor.h>
17 #include <xen/balloon.h>
18 #include <xen/xen.h>
19 #include <xen/xenbus.h>
20 #include <xen/interface/io/ring.h>
21
22 #include <xen/xen-front-pgdir-shbuf.h>
23
24 #ifndef GRANT_INVALID_REF
25 /*
26  * FIXME: usage of grant reference 0 as invalid grant reference:
27  * grant reference 0 is valid, but never exposed to a PV driver,
28  * because of the fact it is already in use/reserved by the PV console.
29  */
30 #define GRANT_INVALID_REF       0
31 #endif
32
33 /**
34  * This structure represents the structure of a shared page
35  * that contains grant references to the pages of the shared
36  * buffer. This structure is common to many Xen para-virtualized
37  * protocols at include/xen/interface/io/
38  */
39 struct xen_page_directory {
40         grant_ref_t gref_dir_next_page;
41         grant_ref_t gref[1]; /* Variable length */
42 };
43
44 /**
45  * Shared buffer ops which are differently implemented
46  * depending on the allocation mode, e.g. if the buffer
47  * is allocated by the corresponding backend or frontend.
48  * Some of the operations.
49  */
50 struct xen_front_pgdir_shbuf_ops {
51         /*
52          * Calculate number of grefs required to handle this buffer,
53          * e.g. if grefs are required for page directory only or the buffer
54          * pages as well.
55          */
56         void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
57
58         /* Fill page directory according to para-virtual display protocol. */
59         void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
60
61         /* Claim grant references for the pages of the buffer. */
62         int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
63                                      grant_ref_t *priv_gref_head, int gref_idx);
64
65         /* Map grant references of the buffer. */
66         int (*map)(struct xen_front_pgdir_shbuf *buf);
67
68         /* Unmap grant references of the buffer. */
69         int (*unmap)(struct xen_front_pgdir_shbuf *buf);
70 };
71
72 /**
73  * Get granted reference to the very first page of the
74  * page directory. Usually this is passed to the backend,
75  * so it can find/fill the grant references to the buffer's
76  * pages.
77  *
78  * \param buf shared buffer which page directory is of interest.
79  * \return granted reference to the very first page of the
80  * page directory.
81  */
82 grant_ref_t
83 xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
84 {
85         if (!buf->grefs)
86                 return GRANT_INVALID_REF;
87
88         return buf->grefs[0];
89 }
90 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
91
92 /**
93  * Map granted references of the shared buffer.
94  *
95  * Depending on the shared buffer mode of allocation
96  * (be_alloc flag) this can either do nothing (for buffers
97  * shared by the frontend itself) or map the provided granted
98  * references onto the backing storage (buf->pages).
99  *
100  * \param buf shared buffer which grants to be maped.
101  * \return zero on success or a negative number on failure.
102  */
103 int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104 {
105         if (buf->ops && buf->ops->map)
106                 return buf->ops->map(buf);
107
108         /* No need to map own grant references. */
109         return 0;
110 }
111 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112
113 /**
114  * Unmap granted references of the shared buffer.
115  *
116  * Depending on the shared buffer mode of allocation
117  * (be_alloc flag) this can either do nothing (for buffers
118  * shared by the frontend itself) or unmap the provided granted
119  * references.
120  *
121  * \param buf shared buffer which grants to be unmaped.
122  * \return zero on success or a negative number on failure.
123  */
124 int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125 {
126         if (buf->ops && buf->ops->unmap)
127                 return buf->ops->unmap(buf);
128
129         /* No need to unmap own grant references. */
130         return 0;
131 }
132 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133
134 /**
135  * Free all the resources of the shared buffer.
136  *
137  * \param buf shared buffer which resources to be freed.
138  */
139 void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140 {
141         if (buf->grefs) {
142                 int i;
143
144                 for (i = 0; i < buf->num_grefs; i++)
145                         if (buf->grefs[i] != GRANT_INVALID_REF)
146                                 gnttab_end_foreign_access(buf->grefs[i],
147                                                           0, 0UL);
148         }
149         kfree(buf->grefs);
150         kfree(buf->directory);
151 }
152 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153
154 /*
155  * Number of grefs a page can hold with respect to the
156  * struct xen_page_directory header.
157  */
158 #define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159                                  offsetof(struct xen_page_directory, \
160                                           gref)) / sizeof(grant_ref_t))
161
162 /**
163  * Get the number of pages the page directory consumes itself.
164  *
165  * \param buf shared buffer.
166  */
167 static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168 {
169         return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170 }
171
172 /**
173  * Calculate the number of grant references needed to share the buffer
174  * and its pages when backend allocates the buffer.
175  *
176  * \param buf shared buffer.
177  */
178 static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179 {
180         /* Only for pages the page directory consumes itself. */
181         buf->num_grefs = get_num_pages_dir(buf);
182 }
183
184 /**
185  * Calculate the number of grant references needed to share the buffer
186  * and its pages when frontend allocates the buffer.
187  *
188  * \param buf shared buffer.
189  */
190 static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191 {
192         /*
193          * Number of pages the page directory consumes itself
194          * plus grefs for the buffer pages.
195          */
196         buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197 }
198
199 #define xen_page_to_vaddr(page) \
200         ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201
202 /**
203  * Unmap the buffer previously mapped with grant references
204  * provided by the backend.
205  *
206  * \param buf shared buffer.
207  * \return zero on success or a negative number on failure.
208  */
209 static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210 {
211         struct gnttab_unmap_grant_ref *unmap_ops;
212         int i, ret;
213
214         if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215                 return 0;
216
217         unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218                             GFP_KERNEL);
219         if (!unmap_ops)
220                 return -ENOMEM;
221
222         for (i = 0; i < buf->num_pages; i++) {
223                 phys_addr_t addr;
224
225                 addr = xen_page_to_vaddr(buf->pages[i]);
226                 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227                                     buf->backend_map_handles[i]);
228         }
229
230         ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231                                 buf->num_pages);
232
233         for (i = 0; i < buf->num_pages; i++) {
234                 if (unlikely(unmap_ops[i].status != GNTST_okay))
235                         dev_err(&buf->xb_dev->dev,
236                                 "Failed to unmap page %d: %d\n",
237                                 i, unmap_ops[i].status);
238         }
239
240         if (ret)
241                 dev_err(&buf->xb_dev->dev,
242                         "Failed to unmap grant references, ret %d", ret);
243
244         kfree(unmap_ops);
245         kfree(buf->backend_map_handles);
246         buf->backend_map_handles = NULL;
247         return ret;
248 }
249
250 /**
251  * Map the buffer with grant references provided by the backend.
252  *
253  * \param buf shared buffer.
254  * \return zero on success or a negative number on failure.
255  */
256 static int backend_map(struct xen_front_pgdir_shbuf *buf)
257 {
258         struct gnttab_map_grant_ref *map_ops = NULL;
259         unsigned char *ptr;
260         int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261
262         map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263         if (!map_ops)
264                 return -ENOMEM;
265
266         buf->backend_map_handles = kcalloc(buf->num_pages,
267                                            sizeof(*buf->backend_map_handles),
268                                            GFP_KERNEL);
269         if (!buf->backend_map_handles) {
270                 kfree(map_ops);
271                 return -ENOMEM;
272         }
273
274         /*
275          * Read page directory to get grefs from the backend: for external
276          * buffer we only allocate buf->grefs for the page directory,
277          * so buf->num_grefs has number of pages in the page directory itself.
278          */
279         ptr = buf->directory;
280         grefs_left = buf->num_pages;
281         cur_page = 0;
282         for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283                 struct xen_page_directory *page_dir =
284                         (struct xen_page_directory *)ptr;
285                 int to_copy = XEN_NUM_GREFS_PER_PAGE;
286
287                 if (to_copy > grefs_left)
288                         to_copy = grefs_left;
289
290                 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291                         phys_addr_t addr;
292
293                         addr = xen_page_to_vaddr(buf->pages[cur_page]);
294                         gnttab_set_map_op(&map_ops[cur_page], addr,
295                                           GNTMAP_host_map,
296                                           page_dir->gref[cur_gref],
297                                           buf->xb_dev->otherend_id);
298                         cur_page++;
299                 }
300
301                 grefs_left -= to_copy;
302                 ptr += PAGE_SIZE;
303         }
304         ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305
306         /* Save handles even if error, so we can unmap. */
307         for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308                 if (likely(map_ops[cur_page].status == GNTST_okay)) {
309                         buf->backend_map_handles[cur_page] =
310                                 map_ops[cur_page].handle;
311                 } else {
312                         buf->backend_map_handles[cur_page] =
313                                 INVALID_GRANT_HANDLE;
314                         if (!ret)
315                                 ret = -ENXIO;
316                         dev_err(&buf->xb_dev->dev,
317                                 "Failed to map page %d: %d\n",
318                                 cur_page, map_ops[cur_page].status);
319                 }
320         }
321
322         if (ret) {
323                 dev_err(&buf->xb_dev->dev,
324                         "Failed to map grant references, ret %d", ret);
325                 backend_unmap(buf);
326         }
327
328         kfree(map_ops);
329         return ret;
330 }
331
332 /**
333  * Fill page directory with grant references to the pages of the
334  * page directory itself.
335  *
336  * The grant references to the buffer pages are provided by the
337  * backend in this case.
338  *
339  * \param buf shared buffer.
340  */
341 static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
342 {
343         struct xen_page_directory *page_dir;
344         unsigned char *ptr;
345         int i, num_pages_dir;
346
347         ptr = buf->directory;
348         num_pages_dir = get_num_pages_dir(buf);
349
350         /* Fill only grefs for the page directory itself. */
351         for (i = 0; i < num_pages_dir - 1; i++) {
352                 page_dir = (struct xen_page_directory *)ptr;
353
354                 page_dir->gref_dir_next_page = buf->grefs[i + 1];
355                 ptr += PAGE_SIZE;
356         }
357         /* Last page must say there is no more pages. */
358         page_dir = (struct xen_page_directory *)ptr;
359         page_dir->gref_dir_next_page = GRANT_INVALID_REF;
360 }
361
362 /**
363  * Fill page directory with grant references to the pages of the
364  * page directory and the buffer we share with the backend.
365  *
366  * \param buf shared buffer.
367  */
368 static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
369 {
370         unsigned char *ptr;
371         int cur_gref, grefs_left, to_copy, i, num_pages_dir;
372
373         ptr = buf->directory;
374         num_pages_dir = get_num_pages_dir(buf);
375
376         /*
377          * While copying, skip grefs at start, they are for pages
378          * granted for the page directory itself.
379          */
380         cur_gref = num_pages_dir;
381         grefs_left = buf->num_pages;
382         for (i = 0; i < num_pages_dir; i++) {
383                 struct xen_page_directory *page_dir =
384                         (struct xen_page_directory *)ptr;
385
386                 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
387                         to_copy = grefs_left;
388                         page_dir->gref_dir_next_page = GRANT_INVALID_REF;
389                 } else {
390                         to_copy = XEN_NUM_GREFS_PER_PAGE;
391                         page_dir->gref_dir_next_page = buf->grefs[i + 1];
392                 }
393                 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
394                        to_copy * sizeof(grant_ref_t));
395                 ptr += PAGE_SIZE;
396                 grefs_left -= to_copy;
397                 cur_gref += to_copy;
398         }
399 }
400
401 /**
402  * Grant references to the frontend's buffer pages.
403  *
404  * These will be shared with the backend, so it can
405  * access the buffer's data.
406  *
407  * \param buf shared buffer.
408  * \return zero on success or a negative number on failure.
409  */
410 static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
411                                        grant_ref_t *priv_gref_head,
412                                        int gref_idx)
413 {
414         int i, cur_ref, otherend_id;
415
416         otherend_id = buf->xb_dev->otherend_id;
417         for (i = 0; i < buf->num_pages; i++) {
418                 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
419                 if (cur_ref < 0)
420                         return cur_ref;
421
422                 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
423                                                 xen_page_to_gfn(buf->pages[i]),
424                                                 0);
425                 buf->grefs[gref_idx++] = cur_ref;
426         }
427         return 0;
428 }
429
430 /**
431  * Grant all the references needed to share the buffer.
432  *
433  * Grant references to the page directory pages and, if
434  * needed, also to the pages of the shared buffer data.
435  *
436  * \param buf shared buffer.
437  * \return zero on success or a negative number on failure.
438  */
439 static int grant_references(struct xen_front_pgdir_shbuf *buf)
440 {
441         grant_ref_t priv_gref_head;
442         int ret, i, j, cur_ref;
443         int otherend_id, num_pages_dir;
444
445         ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
446         if (ret < 0) {
447                 dev_err(&buf->xb_dev->dev,
448                         "Cannot allocate grant references\n");
449                 return ret;
450         }
451
452         otherend_id = buf->xb_dev->otherend_id;
453         j = 0;
454         num_pages_dir = get_num_pages_dir(buf);
455         for (i = 0; i < num_pages_dir; i++) {
456                 unsigned long frame;
457
458                 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
459                 if (cur_ref < 0)
460                         return cur_ref;
461
462                 frame = xen_page_to_gfn(virt_to_page(buf->directory +
463                                                      PAGE_SIZE * i));
464                 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
465                 buf->grefs[j++] = cur_ref;
466         }
467
468         if (buf->ops->grant_refs_for_buffer) {
469                 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
470                 if (ret)
471                         return ret;
472         }
473
474         gnttab_free_grant_references(priv_gref_head);
475         return 0;
476 }
477
478 /**
479  * Allocate all required structures to mange shared buffer.
480  *
481  * \param buf shared buffer.
482  * \return zero on success or a negative number on failure.
483  */
484 static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
485 {
486         buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
487         if (!buf->grefs)
488                 return -ENOMEM;
489
490         buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
491         if (!buf->directory)
492                 return -ENOMEM;
493
494         return 0;
495 }
496
497 /*
498  * For backend allocated buffers we don't need grant_refs_for_buffer
499  * as those grant references are allocated at backend side.
500  */
501 static const struct xen_front_pgdir_shbuf_ops backend_ops = {
502         .calc_num_grefs = backend_calc_num_grefs,
503         .fill_page_dir = backend_fill_page_dir,
504         .map = backend_map,
505         .unmap = backend_unmap
506 };
507
508 /*
509  * For locally granted references we do not need to map/unmap
510  * the references.
511  */
512 static const struct xen_front_pgdir_shbuf_ops local_ops = {
513         .calc_num_grefs = guest_calc_num_grefs,
514         .fill_page_dir = guest_fill_page_dir,
515         .grant_refs_for_buffer = guest_grant_refs_for_buffer,
516 };
517
518 /**
519  * Allocate a new instance of a shared buffer.
520  *
521  * \param cfg configuration to be used while allocating a new shared buffer.
522  * \return zero on success or a negative number on failure.
523  */
524 int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
525 {
526         struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
527         int ret;
528
529         if (cfg->be_alloc)
530                 buf->ops = &backend_ops;
531         else
532                 buf->ops = &local_ops;
533         buf->xb_dev = cfg->xb_dev;
534         buf->num_pages = cfg->num_pages;
535         buf->pages = cfg->pages;
536
537         buf->ops->calc_num_grefs(buf);
538
539         ret = alloc_storage(buf);
540         if (ret)
541                 goto fail;
542
543         ret = grant_references(buf);
544         if (ret)
545                 goto fail;
546
547         buf->ops->fill_page_dir(buf);
548
549         return 0;
550
551 fail:
552         xen_front_pgdir_shbuf_free(buf);
553         return ret;
554 }
555 EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
556
557 MODULE_DESCRIPTION("Xen frontend/backend page directory based "
558                    "shared buffer handling");
559 MODULE_AUTHOR("Oleksandr Andrushchenko");
560 MODULE_LICENSE("GPL");