mm/gup: change GUP fast to use flags rather than a write 'bool'
[linux-2.6-microblaze.git] / drivers / staging / gasket / gasket_page_table.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of Gasket page table support.
4  *
5  * Copyright (C) 2018 Google, Inc.
6  */
7
8 /*
9  * Implementation of Gasket page table support.
10  *
11  * This file assumes 4kB pages throughout; can be factored out when necessary.
12  *
13  * There is a configurable number of page table entries, as well as a
14  * configurable bit index for the extended address flag. Both of these are
15  * specified in gasket_page_table_init through the page_table_config parameter.
16  *
17  * The following example assumes:
18  *   page_table_config->total_entries = 8192
19  *   page_table_config->extended_bit = 63
20  *
21  * Address format:
22  * Simple addresses - those whose containing pages are directly placed in the
23  * device's address translation registers - are laid out as:
24  * [ 63 - 25: 0 | 24 - 12: page index | 11 - 0: page offset ]
25  * page index:  The index of the containing page in the device's address
26  *              translation registers.
27  * page offset: The index of the address into the containing page.
28  *
29  * Extended address - those whose containing pages are contained in a second-
30  * level page table whose address is present in the device's address translation
31  * registers - are laid out as:
32  * [ 63: flag | 62 - 34: 0 | 33 - 21: dev/level 0 index |
33  *   20 - 12: host/level 1 index | 11 - 0: page offset ]
34  * flag:        Marker indicating that this is an extended address. Always 1.
35  * dev index:   The index of the first-level page in the device's extended
36  *              address translation registers.
37  * host index:  The index of the containing page in the [host-resident] second-
38  *              level page table.
39  * page offset: The index of the address into the containing [second-level]
40  *              page.
41  */
42 #include "gasket_page_table.h"
43
44 #include <linux/device.h>
45 #include <linux/file.h>
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/pagemap.h>
51 #include <linux/vmalloc.h>
52
53 #include "gasket_constants.h"
54 #include "gasket_core.h"
55
56 /* Constants & utility macros */
57 /* The number of pages that can be mapped into each second-level page table. */
58 #define GASKET_PAGES_PER_SUBTABLE 512
59
60 /* The starting position of the page index in a simple virtual address. */
61 #define GASKET_SIMPLE_PAGE_SHIFT 12
62
63 /* Flag indicating that a [device] slot is valid for use. */
64 #define GASKET_VALID_SLOT_FLAG 1
65
66 /*
67  * The starting position of the level 0 page index (i.e., the entry in the
68  * device's extended address registers) in an extended address.
69  * Also can be thought of as (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)),
70  * or (12 + 9).
71  */
72 #define GASKET_EXTENDED_LVL0_SHIFT 21
73
74 /*
75  * Number of first level pages that Gasket chips support. Equivalent to
76  * log2(NUM_LVL0_PAGE_TABLES)
77  *
78  * At a maximum, allowing for a 34 bits address space (or 16GB)
79  *   = GASKET_EXTENDED_LVL0_WIDTH + (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)
80  * or, = 13 + 9 + 12
81  */
82 #define GASKET_EXTENDED_LVL0_WIDTH 13
83
84 /*
85  * The starting position of the level 1 page index (i.e., the entry in the
86  * host second-level/sub- table) in an extended address.
87  */
88 #define GASKET_EXTENDED_LVL1_SHIFT 12
89
90 /* Type declarations */
91 /* Valid states for a struct gasket_page_table_entry. */
92 enum pte_status {
93         PTE_FREE,
94         PTE_INUSE,
95 };
96
97 /*
98  * Mapping metadata for a single page.
99  *
100  * In this file, host-side page table entries are referred to as that (or PTEs).
101  * Where device vs. host entries are differentiated, device-side or -visible
102  * entries are called "slots". A slot may be either an entry in the device's
103  * address translation table registers or an entry in a second-level page
104  * table ("subtable").
105  *
106  * The full data in this structure is visible on the host [of course]. Only
107  * the address contained in dma_addr is communicated to the device; that points
108  * to the actual page mapped and described by this structure.
109  */
110 struct gasket_page_table_entry {
111         /* The status of this entry/slot: free or in use. */
112         enum pte_status status;
113
114         /*
115          * Index for alignment into host vaddrs.
116          * When a user specifies a host address for a mapping, that address may
117          * not be page-aligned. Offset is the index into the containing page of
118          * the host address (i.e., host_vaddr & (PAGE_SIZE - 1)).
119          * This is necessary for translating between user-specified addresses
120          * and page-aligned addresses.
121          */
122         int offset;
123
124         /* Address of the page in DMA space. */
125         dma_addr_t dma_addr;
126
127         /* Linux page descriptor for the page described by this structure. */
128         struct page *page;
129
130         /*
131          * If this is an extended and first-level entry, sublevel points
132          * to the second-level entries underneath this entry.
133          */
134         struct gasket_page_table_entry *sublevel;
135 };
136
137 /*
138  * Maintains virtual to physical address mapping for a coherent page that is
139  * allocated by this module for a given device.
140  * Note that coherent pages mappings virt mapping cannot be tracked by the
141  * Linux kernel, and coherent pages don't have a struct page associated,
142  * hence Linux kernel cannot perform a get_user_page_xx() on a phys address
143  * that was allocated coherent.
144  * This structure trivially implements this mechanism.
145  */
146 struct gasket_coherent_page_entry {
147         /* Phys address, dma'able by the owner device */
148         dma_addr_t paddr;
149
150         /* Kernel virtual address */
151         u64 user_virt;
152
153         /* User virtual address that was mapped by the mmap kernel subsystem */
154         u64 kernel_virt;
155
156         /*
157          * Whether this page has been mapped into a user land process virtual
158          * space
159          */
160         u32 in_use;
161 };
162
163 /*
164  * [Host-side] page table descriptor.
165  *
166  * This structure tracks the metadata necessary to manage both simple and
167  * extended page tables.
168  */
169 struct gasket_page_table {
170         /* The config used to create this page table. */
171         struct gasket_page_table_config config;
172
173         /* The number of simple (single-level) entries in the page table. */
174         uint num_simple_entries;
175
176         /* The number of extended (two-level) entries in the page table. */
177         uint num_extended_entries;
178
179         /* Array of [host-side] page table entries. */
180         struct gasket_page_table_entry *entries;
181
182         /* Number of actively mapped kernel pages in this table. */
183         uint num_active_pages;
184
185         /* Device register: base of/first slot in the page table. */
186         u64 __iomem *base_slot;
187
188         /* Device register: holds the offset indicating the start of the
189          * extended address region of the device's address translation table.
190          */
191         u64 __iomem *extended_offset_reg;
192
193         /* Device structure for the underlying device. Only used for logging. */
194         struct device *device;
195
196         /* PCI system descriptor for the underlying device. */
197         struct pci_dev *pci_dev;
198
199         /* Location of the extended address bit for this Gasket device. */
200         u64 extended_flag;
201
202         /* Mutex to protect page table internals. */
203         struct mutex mutex;
204
205         /* Number of coherent pages accessible thru by this page table */
206         int num_coherent_pages;
207
208         /*
209          * List of coherent memory (physical) allocated for a device.
210          *
211          * This structure also remembers the user virtual mapping, this is
212          * hacky, but we need to do this because the kernel doesn't keep track
213          * of the user coherent pages (pfn pages), and virt to coherent page
214          * mapping.
215          * TODO: use find_vma() APIs to convert host address to vm_area, to
216          * dma_addr_t instead of storing user virtu address in
217          * gasket_coherent_page_entry
218          *
219          * Note that the user virtual mapping is created by the driver, in
220          * gasket_mmap function, so user_virt belongs in the driver anyhow.
221          */
222         struct gasket_coherent_page_entry *coherent_pages;
223 };
224
225 /* See gasket_page_table.h for description. */
226 int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
227                            const struct gasket_bar_data *bar_data,
228                            const struct gasket_page_table_config *page_table_config,
229                            struct device *device, struct pci_dev *pci_dev)
230 {
231         ulong bytes;
232         struct gasket_page_table *pg_tbl;
233         ulong total_entries = page_table_config->total_entries;
234
235         /*
236          * TODO: Verify config->total_entries against value read from the
237          * hardware register that contains the page table size.
238          */
239         if (total_entries == ULONG_MAX) {
240                 dev_dbg(device, "Error reading page table size. "
241                         "Initializing page table with size 0\n");
242                 total_entries = 0;
243         }
244
245         dev_dbg(device,
246                 "Attempting to initialize page table of size 0x%lx\n",
247                 total_entries);
248
249         dev_dbg(device,
250                 "Table has base reg 0x%x, extended offset reg 0x%x\n",
251                 page_table_config->base_reg,
252                 page_table_config->extended_reg);
253
254         *ppg_tbl = kzalloc(sizeof(**ppg_tbl), GFP_KERNEL);
255         if (!*ppg_tbl) {
256                 dev_dbg(device, "No memory for page table\n");
257                 return -ENOMEM;
258         }
259
260         pg_tbl = *ppg_tbl;
261         bytes = total_entries * sizeof(struct gasket_page_table_entry);
262         if (bytes != 0) {
263                 pg_tbl->entries = vzalloc(bytes);
264                 if (!pg_tbl->entries) {
265                         dev_dbg(device,
266                                 "No memory for address translation metadata\n");
267                         kfree(pg_tbl);
268                         *ppg_tbl = NULL;
269                         return -ENOMEM;
270                 }
271         }
272
273         mutex_init(&pg_tbl->mutex);
274         memcpy(&pg_tbl->config, page_table_config, sizeof(*page_table_config));
275         if (pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_NORMAL ||
276             pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_SIMPLE) {
277                 pg_tbl->num_simple_entries = total_entries;
278                 pg_tbl->num_extended_entries = 0;
279                 pg_tbl->extended_flag = 1ull << page_table_config->extended_bit;
280         } else {
281                 pg_tbl->num_simple_entries = 0;
282                 pg_tbl->num_extended_entries = total_entries;
283                 pg_tbl->extended_flag = 0;
284         }
285         pg_tbl->num_active_pages = 0;
286         pg_tbl->base_slot =
287                 (u64 __iomem *)&bar_data->virt_base[page_table_config->base_reg];
288         pg_tbl->extended_offset_reg =
289                 (u64 __iomem *)&bar_data->virt_base[page_table_config->extended_reg];
290         pg_tbl->device = get_device(device);
291         pg_tbl->pci_dev = pci_dev;
292
293         dev_dbg(device, "Page table initialized successfully\n");
294
295         return 0;
296 }
297
298 /*
299  * Check if a range of PTEs is free.
300  * The page table mutex must be held by the caller.
301  */
302 static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
303                                      uint num_entries)
304 {
305         int i;
306
307         for (i = 0; i < num_entries; i++) {
308                 if (ptes[i].status != PTE_FREE)
309                         return false;
310         }
311
312         return true;
313 }
314
315 /*
316  * Free a second level page [sub]table.
317  * The page table mutex must be held before this call.
318  */
319 static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
320                                           struct gasket_page_table_entry *pte,
321                                           u64 __iomem *slot)
322 {
323         /* Release the page table from the driver */
324         pte->status = PTE_FREE;
325
326         /* Release the page table from the device */
327         writeq(0, slot);
328
329         if (pte->dma_addr)
330                 dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
331                                DMA_TO_DEVICE);
332
333         vfree(pte->sublevel);
334
335         if (pte->page)
336                 free_page((ulong)page_address(pte->page));
337
338         memset(pte, 0, sizeof(struct gasket_page_table_entry));
339 }
340
341 /*
342  * Actually perform collection.
343  * The page table mutex must be held by the caller.
344  */
345 static void
346 gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
347 {
348         struct gasket_page_table_entry *pte;
349         u64 __iomem *slot;
350
351         /* XXX FIX ME XXX -- more efficient to keep a usage count */
352         /* rather than scanning the second level page tables */
353
354         for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
355              slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
356              pte < pg_tbl->entries + pg_tbl->config.total_entries;
357              pte++, slot++) {
358                 if (pte->status == PTE_INUSE) {
359                         if (gasket_is_pte_range_free(pte->sublevel,
360                                                      GASKET_PAGES_PER_SUBTABLE))
361                                 gasket_free_extended_subtable(pg_tbl, pte,
362                                                               slot);
363                 }
364         }
365 }
366
367 /* See gasket_page_table.h for description. */
368 void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
369 {
370         mutex_lock(&pg_tbl->mutex);
371         gasket_page_table_garbage_collect_nolock(pg_tbl);
372         mutex_unlock(&pg_tbl->mutex);
373 }
374
375 /* See gasket_page_table.h for description. */
376 void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
377 {
378         /* Deallocate free second-level tables. */
379         gasket_page_table_garbage_collect(pg_tbl);
380
381         /* TODO: Check that all PTEs have been freed? */
382
383         vfree(pg_tbl->entries);
384         pg_tbl->entries = NULL;
385
386         put_device(pg_tbl->device);
387         kfree(pg_tbl);
388 }
389
390 /* See gasket_page_table.h for description. */
391 int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
392                                 uint num_simple_entries)
393 {
394         int i, start;
395
396         mutex_lock(&pg_tbl->mutex);
397         if (num_simple_entries > pg_tbl->config.total_entries) {
398                 mutex_unlock(&pg_tbl->mutex);
399                 return -EINVAL;
400         }
401
402         gasket_page_table_garbage_collect_nolock(pg_tbl);
403
404         start = min(pg_tbl->num_simple_entries, num_simple_entries);
405
406         for (i = start; i < pg_tbl->config.total_entries; i++) {
407                 if (pg_tbl->entries[i].status != PTE_FREE) {
408                         dev_err(pg_tbl->device, "entry %d is not free\n", i);
409                         mutex_unlock(&pg_tbl->mutex);
410                         return -EBUSY;
411                 }
412         }
413
414         pg_tbl->num_simple_entries = num_simple_entries;
415         pg_tbl->num_extended_entries =
416                 pg_tbl->config.total_entries - num_simple_entries;
417         writeq(num_simple_entries, pg_tbl->extended_offset_reg);
418
419         mutex_unlock(&pg_tbl->mutex);
420         return 0;
421 }
422 EXPORT_SYMBOL(gasket_page_table_partition);
423
424 /*
425  * Return whether a host buffer was mapped as coherent memory.
426  *
427  * A Gasket page_table currently support one contiguous dma range, mapped to one
428  * contiguous virtual memory range. Check if the host_addr is within that range.
429  */
430 static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
431 {
432         u64 min, max;
433
434         /* whether the host address is within user virt range */
435         if (!pg_tbl->coherent_pages)
436                 return 0;
437
438         min = (u64)pg_tbl->coherent_pages[0].user_virt;
439         max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
440
441         return min <= host_addr && host_addr < max;
442 }
443
444 /* Safely return a page to the OS. */
445 static bool gasket_release_page(struct page *page)
446 {
447         if (!page)
448                 return false;
449
450         if (!PageReserved(page))
451                 SetPageDirty(page);
452         put_page(page);
453
454         return true;
455 }
456
457 /*
458  * Get and map last level page table buffers.
459  *
460  * slots is the location(s) to write device-mapped page address. If this is a
461  * simple mapping, these will be address translation registers. If this is
462  * an extended mapping, these will be within a second-level page table
463  * allocated by the host and so must have their __iomem attribute casted away.
464  */
465 static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
466                                   struct gasket_page_table_entry *ptes,
467                                   u64 __iomem *slots, ulong host_addr,
468                                   uint num_pages, int is_simple_mapping)
469 {
470         int ret;
471         ulong offset;
472         struct page *page;
473         dma_addr_t dma_addr;
474         ulong page_addr;
475         int i;
476
477         for (i = 0; i < num_pages; i++) {
478                 page_addr = host_addr + i * PAGE_SIZE;
479                 offset = page_addr & (PAGE_SIZE - 1);
480                 if (is_coherent(pg_tbl, host_addr)) {
481                         u64 off =
482                                 (u64)host_addr -
483                                 (u64)pg_tbl->coherent_pages[0].user_virt;
484                         ptes[i].page = NULL;
485                         ptes[i].offset = offset;
486                         ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
487                                            off + i * PAGE_SIZE;
488                 } else {
489                         ret = get_user_pages_fast(page_addr - offset, 1,
490                                                   FOLL_WRITE, &page);
491
492                         if (ret <= 0) {
493                                 dev_err(pg_tbl->device,
494                                         "get user pages failed for addr=0x%lx, "
495                                         "offset=0x%lx [ret=%d]\n",
496                                         page_addr, offset, ret);
497                                 return ret ? ret : -ENOMEM;
498                         }
499                         ++pg_tbl->num_active_pages;
500
501                         ptes[i].page = page;
502                         ptes[i].offset = offset;
503
504                         /* Map the page into DMA space. */
505                         ptes[i].dma_addr =
506                                 dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
507                                              DMA_BIDIRECTIONAL);
508
509                         if (dma_mapping_error(pg_tbl->device,
510                                               ptes[i].dma_addr)) {
511                                 if (gasket_release_page(ptes[i].page))
512                                         --pg_tbl->num_active_pages;
513
514                                 memset(&ptes[i], 0,
515                                        sizeof(struct gasket_page_table_entry));
516                                 return -EINVAL;
517                         }
518                 }
519
520                 /* Make the DMA-space address available to the device. */
521                 dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
522
523                 if (is_simple_mapping) {
524                         writeq(dma_addr, &slots[i]);
525                 } else {
526                         ((u64 __force *)slots)[i] = dma_addr;
527                         /* Extended page table vectors are in DRAM,
528                          * and so need to be synced each time they are updated.
529                          */
530                         dma_map_single(pg_tbl->device,
531                                        (void *)&((u64 __force *)slots)[i],
532                                        sizeof(u64), DMA_TO_DEVICE);
533                 }
534                 ptes[i].status = PTE_INUSE;
535         }
536         return 0;
537 }
538
539 /*
540  * Return the index of the page for the address in the simple table.
541  * Does not perform validity checking.
542  */
543 static int gasket_simple_page_idx(struct gasket_page_table *pg_tbl,
544                                   ulong dev_addr)
545 {
546         return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
547                 (pg_tbl->config.total_entries - 1);
548 }
549
550 /*
551  * Return the level 0 page index for the given address.
552  * Does not perform validity checking.
553  */
554 static ulong gasket_extended_lvl0_page_idx(struct gasket_page_table *pg_tbl,
555                                            ulong dev_addr)
556 {
557         return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
558                 (pg_tbl->config.total_entries - 1);
559 }
560
561 /*
562  * Return the level 1 page index for the given address.
563  * Does not perform validity checking.
564  */
565 static ulong gasket_extended_lvl1_page_idx(struct gasket_page_table *pg_tbl,
566                                            ulong dev_addr)
567 {
568         return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
569                (GASKET_PAGES_PER_SUBTABLE - 1);
570 }
571
572 /*
573  * Allocate page table entries in a simple table.
574  * The page table mutex must be held by the caller.
575  */
576 static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
577                                        ulong dev_addr, uint num_pages)
578 {
579         if (!gasket_is_pte_range_free(pg_tbl->entries +
580                                       gasket_simple_page_idx(pg_tbl, dev_addr),
581                                       num_pages))
582                 return -EBUSY;
583
584         return 0;
585 }
586
587 /*
588  * Unmap and release mapped pages.
589  * The page table mutex must be held by the caller.
590  */
591 static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
592                                      struct gasket_page_table_entry *ptes,
593                                      u64 __iomem *slots, uint num_pages,
594                                      int is_simple_mapping)
595 {
596         int i;
597         /*
598          * For each page table entry and corresponding entry in the device's
599          * address translation table:
600          */
601         for (i = 0; i < num_pages; i++) {
602                 /* release the address from the device, */
603                 if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
604                         writeq(0, &slots[i]);
605                 } else {
606                         ((u64 __force *)slots)[i] = 0;
607                         /* sync above PTE update before updating mappings */
608                         wmb();
609                 }
610
611                 /* release the address from the driver, */
612                 if (ptes[i].status == PTE_INUSE) {
613                         if (ptes[i].page && ptes[i].dma_addr) {
614                                 dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
615                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
616                         }
617                         if (gasket_release_page(ptes[i].page))
618                                 --pg_tbl->num_active_pages;
619                 }
620
621                 /* and clear the PTE. */
622                 memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
623         }
624 }
625
626 /*
627  * Unmap and release pages mapped to simple addresses.
628  * The page table mutex must be held by the caller.
629  */
630 static void gasket_unmap_simple_pages(struct gasket_page_table *pg_tbl,
631                                       ulong dev_addr, uint num_pages)
632 {
633         uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
634
635         gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
636                                  pg_tbl->base_slot + slot, num_pages, 1);
637 }
638
639 /*
640  * Unmap and release buffers to extended addresses.
641  * The page table mutex must be held by the caller.
642  */
643 static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
644                                         ulong dev_addr, uint num_pages)
645 {
646         uint slot_idx, remain, len;
647         struct gasket_page_table_entry *pte;
648         u64 __iomem *slot_base;
649
650         remain = num_pages;
651         slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
652         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
653               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
654
655         while (remain > 0) {
656                 /* TODO: Add check to ensure pte remains valid? */
657                 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
658
659                 if (pte->status == PTE_INUSE) {
660                         slot_base = (u64 __iomem *)(page_address(pte->page) +
661                                                     pte->offset);
662                         gasket_perform_unmapping(pg_tbl,
663                                                  pte->sublevel + slot_idx,
664                                                  slot_base + slot_idx, len, 0);
665                 }
666
667                 remain -= len;
668                 slot_idx = 0;
669                 pte++;
670         }
671 }
672
673 /* Evaluates to nonzero if the specified virtual address is simple. */
674 static inline bool gasket_addr_is_simple(struct gasket_page_table *pg_tbl,
675                                          ulong addr)
676 {
677         return !((addr) & (pg_tbl)->extended_flag);
678 }
679
680 /*
681  * Convert (simple, page, offset) into a device address.
682  * Examples:
683  * Simple page 0, offset 32:
684  *  Input (1, 0, 32), Output 0x20
685  * Simple page 1000, offset 511:
686  *  Input (1, 1000, 511), Output 0x3E81FF
687  * Extended page 0, offset 32:
688  *  Input (0, 0, 32), Output 0x8000000020
689  * Extended page 1000, offset 511:
690  *  Input (0, 1000, 511), Output 0x8003E81FF
691  */
692 static ulong gasket_components_to_dev_address(struct gasket_page_table *pg_tbl,
693                                               int is_simple, uint page_index,
694                                               uint offset)
695 {
696         ulong dev_addr = (page_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
697
698         return is_simple ? dev_addr : (pg_tbl->extended_flag | dev_addr);
699 }
700
701 /*
702  * Validity checking for simple addresses.
703  *
704  * Verify that address translation commutes (from address to/from page + offset)
705  * and that the requested page range starts and ends within the set of
706  * currently-partitioned simple pages.
707  */
708 static bool gasket_is_simple_dev_addr_bad(struct gasket_page_table *pg_tbl,
709                                           ulong dev_addr, uint num_pages)
710 {
711         ulong page_offset = dev_addr & (PAGE_SIZE - 1);
712         ulong page_index =
713                 (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
714
715         if (gasket_components_to_dev_address(pg_tbl, 1, page_index,
716                                              page_offset) != dev_addr) {
717                 dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
718                         dev_addr);
719                 return true;
720         }
721
722         if (page_index >= pg_tbl->num_simple_entries) {
723                 dev_err(pg_tbl->device,
724                         "starting slot at %lu is too large, max is < %u\n",
725                         page_index, pg_tbl->num_simple_entries);
726                 return true;
727         }
728
729         if (page_index + num_pages > pg_tbl->num_simple_entries) {
730                 dev_err(pg_tbl->device,
731                         "ending slot at %lu is too large, max is <= %u\n",
732                         page_index + num_pages, pg_tbl->num_simple_entries);
733                 return true;
734         }
735
736         return false;
737 }
738
739 /*
740  * Validity checking for extended addresses.
741  *
742  * Verify that address translation commutes (from address to/from page +
743  * offset) and that the requested page range starts and ends within the set of
744  * currently-partitioned extended pages.
745  */
746 static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
747                                             ulong dev_addr, uint num_pages)
748 {
749         /* Starting byte index of dev_addr into the first mapped page */
750         ulong page_offset = dev_addr & (PAGE_SIZE - 1);
751         ulong page_global_idx, page_lvl0_idx;
752         ulong num_lvl0_pages;
753         ulong addr;
754
755         /* check if the device address is out of bound */
756         addr = dev_addr & ~((pg_tbl)->extended_flag);
757         if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
758                 dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
759                         dev_addr);
760                 return true;
761         }
762
763         /* Find the starting sub-page index in the space of all sub-pages. */
764         page_global_idx = (dev_addr / PAGE_SIZE) &
765                 (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
766
767         /* Find the starting level 0 index. */
768         page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
769
770         /* Get the count of affected level 0 pages. */
771         num_lvl0_pages = DIV_ROUND_UP(num_pages, GASKET_PAGES_PER_SUBTABLE);
772
773         if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
774                                              page_offset) != dev_addr) {
775                 dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
776                         dev_addr);
777                 return true;
778         }
779
780         if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
781                 dev_err(pg_tbl->device,
782                         "starting level 0 slot at %lu is too large, max is < "
783                         "%u\n", page_lvl0_idx, pg_tbl->num_extended_entries);
784                 return true;
785         }
786
787         if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
788                 dev_err(pg_tbl->device,
789                         "ending level 0 slot at %lu is too large, max is <= %u\n",
790                         page_lvl0_idx + num_lvl0_pages,
791                         pg_tbl->num_extended_entries);
792                 return true;
793         }
794
795         return false;
796 }
797
798 /*
799  * Non-locking entry to unmapping routines.
800  * The page table mutex must be held by the caller.
801  */
802 static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
803                                            ulong dev_addr, uint num_pages)
804 {
805         if (!num_pages)
806                 return;
807
808         if (gasket_addr_is_simple(pg_tbl, dev_addr))
809                 gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
810         else
811                 gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
812 }
813
814 /*
815  * Allocate and map pages to simple addresses.
816  * If there is an error, no pages are mapped.
817  */
818 static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
819                                    ulong host_addr, ulong dev_addr,
820                                    uint num_pages)
821 {
822         int ret;
823         uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
824
825         ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
826         if (ret) {
827                 dev_err(pg_tbl->device,
828                         "page table slots %u (@ 0x%lx) to %u are not available\n",
829                         slot_idx, dev_addr, slot_idx + num_pages - 1);
830                 return ret;
831         }
832
833         ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
834                                      pg_tbl->base_slot + slot_idx, host_addr,
835                                      num_pages, 1);
836
837         if (ret) {
838                 gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
839                 dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
840         }
841         return ret;
842 }
843
844 /*
845  * Allocate a second level page table.
846  * The page table mutex must be held by the caller.
847  */
848 static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
849                                           struct gasket_page_table_entry *pte,
850                                           u64 __iomem *slot)
851 {
852         ulong page_addr, subtable_bytes;
853         dma_addr_t dma_addr;
854
855         /* XXX FIX ME XXX this is inefficient for non-4K page sizes */
856
857         /* GFP_DMA flag must be passed to architectures for which
858          * part of the memory range is not considered DMA'able.
859          * This seems to be the case for Juno board with 4.5.0 Linaro kernel
860          */
861         page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
862         if (!page_addr)
863                 return -ENOMEM;
864         pte->page = virt_to_page((void *)page_addr);
865         pte->offset = 0;
866
867         subtable_bytes = sizeof(struct gasket_page_table_entry) *
868                 GASKET_PAGES_PER_SUBTABLE;
869         pte->sublevel = vzalloc(subtable_bytes);
870         if (!pte->sublevel) {
871                 free_page(page_addr);
872                 memset(pte, 0, sizeof(struct gasket_page_table_entry));
873                 return -ENOMEM;
874         }
875
876         /* Map the page into DMA space. */
877         pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
878                                      DMA_TO_DEVICE);
879         if (dma_mapping_error(pg_tbl->device, pte->dma_addr)) {
880                 free_page(page_addr);
881                 vfree(pte->sublevel);
882                 memset(pte, 0, sizeof(struct gasket_page_table_entry));
883                 return -ENOMEM;
884         }
885
886         /* make the addresses available to the device */
887         dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
888         writeq(dma_addr, slot);
889
890         pte->status = PTE_INUSE;
891
892         return 0;
893 }
894
895 /*
896  * Allocate slots in an extended page table.  Check to see if a range of page
897  * table slots are available. If necessary, memory is allocated for second level
898  * page tables.
899  *
900  * Note that memory for second level page tables is allocated as needed, but
901  * that memory is only freed on the final close of the device file, when the
902  * page tables are repartitioned, or the the device is removed.  If there is an
903  * error or if the full range of slots is not available, any memory
904  * allocated for second level page tables remains allocated until final close,
905  * repartition, or device removal.
906  *
907  * The page table mutex must be held by the caller.
908  */
909 static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
910                                          ulong dev_addr, uint num_entries)
911 {
912         int ret = 0;
913         uint remain, subtable_slot_idx, len;
914         struct gasket_page_table_entry *pte;
915         u64 __iomem *slot;
916
917         remain = num_entries;
918         subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
919         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
920               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
921         slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
922                gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
923
924         while (remain > 0) {
925                 len = min(remain,
926                           GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
927
928                 if (pte->status == PTE_FREE) {
929                         ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
930                         if (ret) {
931                                 dev_err(pg_tbl->device,
932                                         "no memory for extended addr subtable\n");
933                                 return ret;
934                         }
935                 } else {
936                         if (!gasket_is_pte_range_free(pte->sublevel +
937                                                       subtable_slot_idx, len))
938                                 return -EBUSY;
939                 }
940
941                 remain -= len;
942                 subtable_slot_idx = 0;
943                 pte++;
944                 slot++;
945         }
946
947         return 0;
948 }
949
950 /*
951  * gasket_map_extended_pages - Get and map buffers to extended addresses.
952  * If there is an error, no pages are mapped.
953  */
954 static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
955                                      ulong host_addr, ulong dev_addr,
956                                      uint num_pages)
957 {
958         int ret;
959         ulong dev_addr_end;
960         uint slot_idx, remain, len;
961         struct gasket_page_table_entry *pte;
962         u64 __iomem *slot_base;
963
964         ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
965         if (ret) {
966                 dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
967                 dev_err(pg_tbl->device,
968                         "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are "
969                         "not available\n",
970                         gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
971                         dev_addr,
972                         gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
973                         gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
974                         gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
975                 return ret;
976         }
977
978         remain = num_pages;
979         slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
980         pte = pg_tbl->entries + pg_tbl->num_simple_entries +
981               gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
982
983         while (remain > 0) {
984                 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
985
986                 slot_base =
987                         (u64 __iomem *)(page_address(pte->page) + pte->offset);
988                 ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
989                                              slot_base + slot_idx, host_addr,
990                                              len, 0);
991                 if (ret) {
992                         gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
993                                                        num_pages);
994                         return ret;
995                 }
996
997                 remain -= len;
998                 slot_idx = 0;
999                 pte++;
1000                 host_addr += len * PAGE_SIZE;
1001         }
1002
1003         return 0;
1004 }
1005
1006 /*
1007  * See gasket_page_table.h for general description.
1008  *
1009  * gasket_page_table_map calls either gasket_map_simple_pages() or
1010  * gasket_map_extended_pages() to actually perform the mapping.
1011  *
1012  * The page table mutex is held for the entire operation.
1013  */
1014 int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
1015                           ulong dev_addr, uint num_pages)
1016 {
1017         int ret;
1018
1019         if (!num_pages)
1020                 return 0;
1021
1022         mutex_lock(&pg_tbl->mutex);
1023
1024         if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1025                 ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
1026                                               num_pages);
1027         } else {
1028                 ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
1029                                                 num_pages);
1030         }
1031
1032         mutex_unlock(&pg_tbl->mutex);
1033         return ret;
1034 }
1035 EXPORT_SYMBOL(gasket_page_table_map);
1036
1037 /*
1038  * See gasket_page_table.h for general description.
1039  *
1040  * gasket_page_table_unmap takes the page table lock and calls either
1041  * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
1042  * actually unmap the pages from device space.
1043  *
1044  * The page table mutex is held for the entire operation.
1045  */
1046 void gasket_page_table_unmap(struct gasket_page_table *pg_tbl, ulong dev_addr,
1047                              uint num_pages)
1048 {
1049         if (!num_pages)
1050                 return;
1051
1052         mutex_lock(&pg_tbl->mutex);
1053         gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
1054         mutex_unlock(&pg_tbl->mutex);
1055 }
1056 EXPORT_SYMBOL(gasket_page_table_unmap);
1057
1058 static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
1059 {
1060         gasket_unmap_simple_pages(pg_tbl,
1061                                   gasket_components_to_dev_address(pg_tbl, 1, 0,
1062                                                                    0),
1063                                   pg_tbl->num_simple_entries);
1064         gasket_unmap_extended_pages(pg_tbl,
1065                                     gasket_components_to_dev_address(pg_tbl, 0,
1066                                                                      0, 0),
1067                                     pg_tbl->num_extended_entries *
1068                                     GASKET_PAGES_PER_SUBTABLE);
1069 }
1070
1071 /* See gasket_page_table.h for description. */
1072 void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
1073 {
1074         mutex_lock(&pg_tbl->mutex);
1075         gasket_page_table_unmap_all_nolock(pg_tbl);
1076         mutex_unlock(&pg_tbl->mutex);
1077 }
1078 EXPORT_SYMBOL(gasket_page_table_unmap_all);
1079
1080 /* See gasket_page_table.h for description. */
1081 void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
1082 {
1083         mutex_lock(&pg_tbl->mutex);
1084         gasket_page_table_unmap_all_nolock(pg_tbl);
1085         writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
1086         mutex_unlock(&pg_tbl->mutex);
1087 }
1088
1089 /* See gasket_page_table.h for description. */
1090 int gasket_page_table_lookup_page(struct gasket_page_table *pg_tbl,
1091                                   ulong dev_addr, struct page **ppage,
1092                                   ulong *poffset)
1093 {
1094         uint page_num;
1095         struct gasket_page_table_entry *pte;
1096
1097         mutex_lock(&pg_tbl->mutex);
1098         if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1099                 page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
1100                 if (page_num >= pg_tbl->num_simple_entries)
1101                         goto fail;
1102
1103                 pte = pg_tbl->entries + page_num;
1104                 if (pte->status != PTE_INUSE)
1105                         goto fail;
1106         } else {
1107                 /* Find the level 0 entry, */
1108                 page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
1109                 if (page_num >= pg_tbl->num_extended_entries)
1110                         goto fail;
1111
1112                 pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
1113                 if (pte->status != PTE_INUSE)
1114                         goto fail;
1115
1116                 /* and its contained level 1 entry. */
1117                 page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
1118                 pte = pte->sublevel + page_num;
1119                 if (pte->status != PTE_INUSE)
1120                         goto fail;
1121         }
1122
1123         *ppage = pte->page;
1124         *poffset = pte->offset;
1125         mutex_unlock(&pg_tbl->mutex);
1126         return 0;
1127
1128 fail:
1129         *ppage = NULL;
1130         *poffset = 0;
1131         mutex_unlock(&pg_tbl->mutex);
1132         return -EINVAL;
1133 }
1134
1135 /* See gasket_page_table.h for description. */
1136 bool gasket_page_table_are_addrs_bad(struct gasket_page_table *pg_tbl,
1137                                      ulong host_addr, ulong dev_addr,
1138                                      ulong bytes)
1139 {
1140         if (host_addr & (PAGE_SIZE - 1)) {
1141                 dev_err(pg_tbl->device,
1142                         "host mapping address 0x%lx must be page aligned\n",
1143                         host_addr);
1144                 return true;
1145         }
1146
1147         return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
1148 }
1149 EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
1150
1151 /* See gasket_page_table.h for description. */
1152 bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *pg_tbl,
1153                                        ulong dev_addr, ulong bytes)
1154 {
1155         uint num_pages = bytes / PAGE_SIZE;
1156
1157         if (bytes & (PAGE_SIZE - 1)) {
1158                 dev_err(pg_tbl->device,
1159                         "mapping size 0x%lX must be page aligned\n", bytes);
1160                 return true;
1161         }
1162
1163         if (num_pages == 0) {
1164                 dev_err(pg_tbl->device,
1165                         "requested mapping is less than one page: %lu / %lu\n",
1166                         bytes, PAGE_SIZE);
1167                 return true;
1168         }
1169
1170         if (gasket_addr_is_simple(pg_tbl, dev_addr))
1171                 return gasket_is_simple_dev_addr_bad(pg_tbl, dev_addr,
1172                                                      num_pages);
1173         return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
1174 }
1175 EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
1176
1177 /* See gasket_page_table.h for description. */
1178 uint gasket_page_table_max_size(struct gasket_page_table *page_table)
1179 {
1180         if (!page_table)
1181                 return 0;
1182         return page_table->config.total_entries;
1183 }
1184 EXPORT_SYMBOL(gasket_page_table_max_size);
1185
1186 /* See gasket_page_table.h for description. */
1187 uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
1188 {
1189         if (!pg_tbl)
1190                 return 0;
1191         return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
1192 }
1193 EXPORT_SYMBOL(gasket_page_table_num_entries);
1194
1195 /* See gasket_page_table.h for description. */
1196 uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
1197 {
1198         if (!pg_tbl)
1199                 return 0;
1200         return pg_tbl->num_simple_entries;
1201 }
1202 EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
1203
1204 /* See gasket_page_table.h for description. */
1205 uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
1206 {
1207         if (!pg_tbl)
1208                 return 0;
1209         return pg_tbl->num_active_pages;
1210 }
1211 EXPORT_SYMBOL(gasket_page_table_num_active_pages);
1212
1213 /* See gasket_page_table.h */
1214 int gasket_page_table_system_status(struct gasket_page_table *page_table)
1215 {
1216         if (!page_table)
1217                 return GASKET_STATUS_LAMED;
1218
1219         if (gasket_page_table_num_entries(page_table) == 0) {
1220                 dev_dbg(page_table->device, "Page table size is 0\n");
1221                 return GASKET_STATUS_LAMED;
1222         }
1223
1224         return GASKET_STATUS_ALIVE;
1225 }
1226
1227 /* Record the host_addr to coherent dma memory mapping. */
1228 int gasket_set_user_virt(struct gasket_dev *gasket_dev, u64 size,
1229                          dma_addr_t dma_address, ulong vma)
1230 {
1231         int j;
1232         struct gasket_page_table *pg_tbl;
1233
1234         unsigned int num_pages = size / PAGE_SIZE;
1235
1236         /*
1237          * TODO: for future chipset, better handling of the case where multiple
1238          * page tables are supported on a given device
1239          */
1240         pg_tbl = gasket_dev->page_table[0];
1241         if (!pg_tbl) {
1242                 dev_dbg(gasket_dev->dev, "%s: invalid page table index\n",
1243                         __func__);
1244                 return 0;
1245         }
1246         for (j = 0; j < num_pages; j++) {
1247                 pg_tbl->coherent_pages[j].user_virt =
1248                         (u64)vma + j * PAGE_SIZE;
1249         }
1250         return 0;
1251 }
1252
1253 /* Allocate a block of coherent memory. */
1254 int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1255                                  dma_addr_t *dma_address, u64 index)
1256 {
1257         dma_addr_t handle;
1258         void *mem;
1259         int j;
1260         unsigned int num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
1261         const struct gasket_driver_desc *driver_desc =
1262                 gasket_get_driver_desc(gasket_dev);
1263
1264         if (!gasket_dev->page_table[index])
1265                 return -EFAULT;
1266
1267         if (num_pages == 0)
1268                 return -EINVAL;
1269
1270         mem = dma_alloc_coherent(gasket_get_device(gasket_dev),
1271                                  num_pages * PAGE_SIZE, &handle, GFP_KERNEL);
1272         if (!mem)
1273                 goto nomem;
1274
1275         gasket_dev->page_table[index]->num_coherent_pages = num_pages;
1276
1277         /* allocate the physical memory block */
1278         gasket_dev->page_table[index]->coherent_pages =
1279                 kcalloc(num_pages,
1280                         sizeof(*gasket_dev->page_table[index]->coherent_pages),
1281                         GFP_KERNEL);
1282         if (!gasket_dev->page_table[index]->coherent_pages)
1283                 goto nomem;
1284
1285         gasket_dev->coherent_buffer.length_bytes =
1286                 PAGE_SIZE * (num_pages);
1287         gasket_dev->coherent_buffer.phys_base = handle;
1288         gasket_dev->coherent_buffer.virt_base = mem;
1289
1290         *dma_address = driver_desc->coherent_buffer_description.base;
1291         for (j = 0; j < num_pages; j++) {
1292                 gasket_dev->page_table[index]->coherent_pages[j].paddr =
1293                         handle + j * PAGE_SIZE;
1294                 gasket_dev->page_table[index]->coherent_pages[j].kernel_virt =
1295                         (u64)mem + j * PAGE_SIZE;
1296         }
1297
1298         return 0;
1299
1300 nomem:
1301         if (mem) {
1302                 dma_free_coherent(gasket_get_device(gasket_dev),
1303                                   num_pages * PAGE_SIZE, mem, handle);
1304                 gasket_dev->coherent_buffer.length_bytes = 0;
1305                 gasket_dev->coherent_buffer.virt_base = NULL;
1306                 gasket_dev->coherent_buffer.phys_base = 0;
1307         }
1308
1309         kfree(gasket_dev->page_table[index]->coherent_pages);
1310         gasket_dev->page_table[index]->coherent_pages = NULL;
1311         gasket_dev->page_table[index]->num_coherent_pages = 0;
1312         return -ENOMEM;
1313 }
1314
1315 /* Free a block of coherent memory. */
1316 int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1317                                 dma_addr_t dma_address, u64 index)
1318 {
1319         const struct gasket_driver_desc *driver_desc;
1320
1321         if (!gasket_dev->page_table[index])
1322                 return -EFAULT;
1323
1324         driver_desc = gasket_get_driver_desc(gasket_dev);
1325
1326         if (driver_desc->coherent_buffer_description.base != dma_address)
1327                 return -EADDRNOTAVAIL;
1328
1329         if (gasket_dev->coherent_buffer.length_bytes) {
1330                 dma_free_coherent(gasket_get_device(gasket_dev),
1331                                   gasket_dev->coherent_buffer.length_bytes,
1332                                   gasket_dev->coherent_buffer.virt_base,
1333                                   gasket_dev->coherent_buffer.phys_base);
1334                 gasket_dev->coherent_buffer.length_bytes = 0;
1335                 gasket_dev->coherent_buffer.virt_base = NULL;
1336                 gasket_dev->coherent_buffer.phys_base = 0;
1337         }
1338
1339         kfree(gasket_dev->page_table[index]->coherent_pages);
1340         gasket_dev->page_table[index]->coherent_pages = NULL;
1341         gasket_dev->page_table[index]->num_coherent_pages = 0;
1342
1343         return 0;
1344 }
1345
1346 /* Release all coherent memory. */
1347 void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev, u64 index)
1348 {
1349         if (!gasket_dev->page_table[index])
1350                 return;
1351
1352         if (gasket_dev->coherent_buffer.length_bytes) {
1353                 dma_free_coherent(gasket_get_device(gasket_dev),
1354                                   gasket_dev->coherent_buffer.length_bytes,
1355                                   gasket_dev->coherent_buffer.virt_base,
1356                                   gasket_dev->coherent_buffer.phys_base);
1357                 gasket_dev->coherent_buffer.length_bytes = 0;
1358                 gasket_dev->coherent_buffer.virt_base = NULL;
1359                 gasket_dev->coherent_buffer.phys_base = 0;
1360         }
1361 }