powerpc/iommu: Stop using @current in mm_iommu_xxx
[linux-2.6-microblaze.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
24 #include <asm/tce.h>
25 #include <asm/mmu_context.h>
26
27 #define DRIVER_VERSION  "0.1"
28 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
29 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
30
31 static void tce_iommu_detach_group(void *iommu_data,
32                 struct iommu_group *iommu_group);
33
34 static long try_increment_locked_vm(long npages)
35 {
36         long ret = 0, locked, lock_limit;
37
38         if (!current || !current->mm)
39                 return -ESRCH; /* process exited */
40
41         if (!npages)
42                 return 0;
43
44         down_write(&current->mm->mmap_sem);
45         locked = current->mm->locked_vm + npages;
46         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48                 ret = -ENOMEM;
49         else
50                 current->mm->locked_vm += npages;
51
52         pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53                         npages << PAGE_SHIFT,
54                         current->mm->locked_vm << PAGE_SHIFT,
55                         rlimit(RLIMIT_MEMLOCK),
56                         ret ? " - exceeded" : "");
57
58         up_write(&current->mm->mmap_sem);
59
60         return ret;
61 }
62
63 static void decrement_locked_vm(long npages)
64 {
65         if (!current || !current->mm || !npages)
66                 return; /* process exited */
67
68         down_write(&current->mm->mmap_sem);
69         if (WARN_ON_ONCE(npages > current->mm->locked_vm))
70                 npages = current->mm->locked_vm;
71         current->mm->locked_vm -= npages;
72         pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73                         npages << PAGE_SHIFT,
74                         current->mm->locked_vm << PAGE_SHIFT,
75                         rlimit(RLIMIT_MEMLOCK));
76         up_write(&current->mm->mmap_sem);
77 }
78
79 /*
80  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81  *
82  * This code handles mapping and unmapping of user data buffers
83  * into DMA'ble space using the IOMMU
84  */
85
86 struct tce_iommu_group {
87         struct list_head next;
88         struct iommu_group *grp;
89 };
90
91 /*
92  * The container descriptor supports only a single group per container.
93  * Required by the API as the container is not supplied with the IOMMU group
94  * at the moment of initialization.
95  */
96 struct tce_container {
97         struct mutex lock;
98         bool enabled;
99         bool v2;
100         unsigned long locked_pages;
101         struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
102         struct list_head group_list;
103 };
104
105 static long tce_iommu_unregister_pages(struct tce_container *container,
106                 __u64 vaddr, __u64 size)
107 {
108         struct mm_iommu_table_group_mem_t *mem;
109
110         if (!current || !current->mm)
111                 return -ESRCH; /* process exited */
112
113         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
114                 return -EINVAL;
115
116         mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT);
117         if (!mem)
118                 return -ENOENT;
119
120         return mm_iommu_put(current->mm, mem);
121 }
122
123 static long tce_iommu_register_pages(struct tce_container *container,
124                 __u64 vaddr, __u64 size)
125 {
126         long ret = 0;
127         struct mm_iommu_table_group_mem_t *mem = NULL;
128         unsigned long entries = size >> PAGE_SHIFT;
129
130         if (!current || !current->mm)
131                 return -ESRCH; /* process exited */
132
133         if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
134                         ((vaddr + size) < vaddr))
135                 return -EINVAL;
136
137         ret = mm_iommu_get(current->mm, vaddr, entries, &mem);
138         if (ret)
139                 return ret;
140
141         container->enabled = true;
142
143         return 0;
144 }
145
146 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
147 {
148         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
149                         tbl->it_size, PAGE_SIZE);
150         unsigned long *uas;
151         long ret;
152
153         BUG_ON(tbl->it_userspace);
154
155         ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
156         if (ret)
157                 return ret;
158
159         uas = vzalloc(cb);
160         if (!uas) {
161                 decrement_locked_vm(cb >> PAGE_SHIFT);
162                 return -ENOMEM;
163         }
164         tbl->it_userspace = uas;
165
166         return 0;
167 }
168
169 static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
170 {
171         unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
172                         tbl->it_size, PAGE_SIZE);
173
174         if (!tbl->it_userspace)
175                 return;
176
177         vfree(tbl->it_userspace);
178         tbl->it_userspace = NULL;
179         decrement_locked_vm(cb >> PAGE_SHIFT);
180 }
181
182 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
183 {
184         /*
185          * Check that the TCE table granularity is not bigger than the size of
186          * a page we just found. Otherwise the hardware can get access to
187          * a bigger memory chunk that it should.
188          */
189         return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
190 }
191
192 static inline bool tce_groups_attached(struct tce_container *container)
193 {
194         return !list_empty(&container->group_list);
195 }
196
197 static long tce_iommu_find_table(struct tce_container *container,
198                 phys_addr_t ioba, struct iommu_table **ptbl)
199 {
200         long i;
201
202         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
203                 struct iommu_table *tbl = container->tables[i];
204
205                 if (tbl) {
206                         unsigned long entry = ioba >> tbl->it_page_shift;
207                         unsigned long start = tbl->it_offset;
208                         unsigned long end = start + tbl->it_size;
209
210                         if ((start <= entry) && (entry < end)) {
211                                 *ptbl = tbl;
212                                 return i;
213                         }
214                 }
215         }
216
217         return -1;
218 }
219
220 static int tce_iommu_find_free_table(struct tce_container *container)
221 {
222         int i;
223
224         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
225                 if (!container->tables[i])
226                         return i;
227         }
228
229         return -ENOSPC;
230 }
231
232 static int tce_iommu_enable(struct tce_container *container)
233 {
234         int ret = 0;
235         unsigned long locked;
236         struct iommu_table_group *table_group;
237         struct tce_iommu_group *tcegrp;
238
239         if (!current->mm)
240                 return -ESRCH; /* process exited */
241
242         if (container->enabled)
243                 return -EBUSY;
244
245         /*
246          * When userspace pages are mapped into the IOMMU, they are effectively
247          * locked memory, so, theoretically, we need to update the accounting
248          * of locked pages on each map and unmap.  For powerpc, the map unmap
249          * paths can be very hot, though, and the accounting would kill
250          * performance, especially since it would be difficult to impossible
251          * to handle the accounting in real mode only.
252          *
253          * To address that, rather than precisely accounting every page, we
254          * instead account for a worst case on locked memory when the iommu is
255          * enabled and disabled.  The worst case upper bound on locked memory
256          * is the size of the whole iommu window, which is usually relatively
257          * small (compared to total memory sizes) on POWER hardware.
258          *
259          * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
260          * that would effectively kill the guest at random points, much better
261          * enforcing the limit based on the max that the guest can map.
262          *
263          * Unfortunately at the moment it counts whole tables, no matter how
264          * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
265          * each with 2GB DMA window, 8GB will be counted here. The reason for
266          * this is that we cannot tell here the amount of RAM used by the guest
267          * as this information is only available from KVM and VFIO is
268          * KVM agnostic.
269          *
270          * So we do not allow enabling a container without a group attached
271          * as there is no way to know how much we should increment
272          * the locked_vm counter.
273          */
274         if (!tce_groups_attached(container))
275                 return -ENODEV;
276
277         tcegrp = list_first_entry(&container->group_list,
278                         struct tce_iommu_group, next);
279         table_group = iommu_group_get_iommudata(tcegrp->grp);
280         if (!table_group)
281                 return -ENODEV;
282
283         if (!table_group->tce32_size)
284                 return -EPERM;
285
286         locked = table_group->tce32_size >> PAGE_SHIFT;
287         ret = try_increment_locked_vm(locked);
288         if (ret)
289                 return ret;
290
291         container->locked_pages = locked;
292
293         container->enabled = true;
294
295         return ret;
296 }
297
298 static void tce_iommu_disable(struct tce_container *container)
299 {
300         if (!container->enabled)
301                 return;
302
303         container->enabled = false;
304
305         if (!current->mm)
306                 return;
307
308         decrement_locked_vm(container->locked_pages);
309 }
310
311 static void *tce_iommu_open(unsigned long arg)
312 {
313         struct tce_container *container;
314
315         if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
316                 pr_err("tce_vfio: Wrong IOMMU type\n");
317                 return ERR_PTR(-EINVAL);
318         }
319
320         container = kzalloc(sizeof(*container), GFP_KERNEL);
321         if (!container)
322                 return ERR_PTR(-ENOMEM);
323
324         mutex_init(&container->lock);
325         INIT_LIST_HEAD_RCU(&container->group_list);
326
327         container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
328
329         return container;
330 }
331
332 static int tce_iommu_clear(struct tce_container *container,
333                 struct iommu_table *tbl,
334                 unsigned long entry, unsigned long pages);
335 static void tce_iommu_free_table(struct iommu_table *tbl);
336
337 static void tce_iommu_release(void *iommu_data)
338 {
339         struct tce_container *container = iommu_data;
340         struct tce_iommu_group *tcegrp;
341         long i;
342
343         while (tce_groups_attached(container)) {
344                 tcegrp = list_first_entry(&container->group_list,
345                                 struct tce_iommu_group, next);
346                 tce_iommu_detach_group(iommu_data, tcegrp->grp);
347         }
348
349         /*
350          * If VFIO created a table, it was not disposed
351          * by tce_iommu_detach_group() so do it now.
352          */
353         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
354                 struct iommu_table *tbl = container->tables[i];
355
356                 if (!tbl)
357                         continue;
358
359                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
360                 tce_iommu_free_table(tbl);
361         }
362
363         tce_iommu_disable(container);
364         mutex_destroy(&container->lock);
365
366         kfree(container);
367 }
368
369 static void tce_iommu_unuse_page(struct tce_container *container,
370                 unsigned long hpa)
371 {
372         struct page *page;
373
374         page = pfn_to_page(hpa >> PAGE_SHIFT);
375         put_page(page);
376 }
377
378 static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
379                 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
380 {
381         long ret = 0;
382         struct mm_iommu_table_group_mem_t *mem;
383
384         mem = mm_iommu_lookup(current->mm, tce, size);
385         if (!mem)
386                 return -EINVAL;
387
388         ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
389         if (ret)
390                 return -EINVAL;
391
392         *pmem = mem;
393
394         return 0;
395 }
396
397 static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
398                 unsigned long entry)
399 {
400         struct mm_iommu_table_group_mem_t *mem = NULL;
401         int ret;
402         unsigned long hpa = 0;
403         unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
404
405         if (!pua || !current || !current->mm)
406                 return;
407
408         ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
409                         &hpa, &mem);
410         if (ret)
411                 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
412                                 __func__, *pua, entry, ret);
413         if (mem)
414                 mm_iommu_mapped_dec(mem);
415
416         *pua = 0;
417 }
418
419 static int tce_iommu_clear(struct tce_container *container,
420                 struct iommu_table *tbl,
421                 unsigned long entry, unsigned long pages)
422 {
423         unsigned long oldhpa;
424         long ret;
425         enum dma_data_direction direction;
426
427         for ( ; pages; --pages, ++entry) {
428                 direction = DMA_NONE;
429                 oldhpa = 0;
430                 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
431                 if (ret)
432                         continue;
433
434                 if (direction == DMA_NONE)
435                         continue;
436
437                 if (container->v2) {
438                         tce_iommu_unuse_page_v2(tbl, entry);
439                         continue;
440                 }
441
442                 tce_iommu_unuse_page(container, oldhpa);
443         }
444
445         return 0;
446 }
447
448 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
449 {
450         struct page *page = NULL;
451         enum dma_data_direction direction = iommu_tce_direction(tce);
452
453         if (get_user_pages_fast(tce & PAGE_MASK, 1,
454                         direction != DMA_TO_DEVICE, &page) != 1)
455                 return -EFAULT;
456
457         *hpa = __pa((unsigned long) page_address(page));
458
459         return 0;
460 }
461
462 static long tce_iommu_build(struct tce_container *container,
463                 struct iommu_table *tbl,
464                 unsigned long entry, unsigned long tce, unsigned long pages,
465                 enum dma_data_direction direction)
466 {
467         long i, ret = 0;
468         struct page *page;
469         unsigned long hpa;
470         enum dma_data_direction dirtmp;
471
472         for (i = 0; i < pages; ++i) {
473                 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
474
475                 ret = tce_iommu_use_page(tce, &hpa);
476                 if (ret)
477                         break;
478
479                 page = pfn_to_page(hpa >> PAGE_SHIFT);
480                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
481                         ret = -EPERM;
482                         break;
483                 }
484
485                 hpa |= offset;
486                 dirtmp = direction;
487                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
488                 if (ret) {
489                         tce_iommu_unuse_page(container, hpa);
490                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
491                                         __func__, entry << tbl->it_page_shift,
492                                         tce, ret);
493                         break;
494                 }
495
496                 if (dirtmp != DMA_NONE)
497                         tce_iommu_unuse_page(container, hpa);
498
499                 tce += IOMMU_PAGE_SIZE(tbl);
500         }
501
502         if (ret)
503                 tce_iommu_clear(container, tbl, entry, i);
504
505         return ret;
506 }
507
508 static long tce_iommu_build_v2(struct tce_container *container,
509                 struct iommu_table *tbl,
510                 unsigned long entry, unsigned long tce, unsigned long pages,
511                 enum dma_data_direction direction)
512 {
513         long i, ret = 0;
514         struct page *page;
515         unsigned long hpa;
516         enum dma_data_direction dirtmp;
517
518         for (i = 0; i < pages; ++i) {
519                 struct mm_iommu_table_group_mem_t *mem = NULL;
520                 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
521                                 entry + i);
522
523                 ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
524                                 &hpa, &mem);
525                 if (ret)
526                         break;
527
528                 page = pfn_to_page(hpa >> PAGE_SHIFT);
529                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
530                         ret = -EPERM;
531                         break;
532                 }
533
534                 /* Preserve offset within IOMMU page */
535                 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
536                 dirtmp = direction;
537
538                 /* The registered region is being unregistered */
539                 if (mm_iommu_mapped_inc(mem))
540                         break;
541
542                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
543                 if (ret) {
544                         /* dirtmp cannot be DMA_NONE here */
545                         tce_iommu_unuse_page_v2(tbl, entry + i);
546                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
547                                         __func__, entry << tbl->it_page_shift,
548                                         tce, ret);
549                         break;
550                 }
551
552                 if (dirtmp != DMA_NONE)
553                         tce_iommu_unuse_page_v2(tbl, entry + i);
554
555                 *pua = tce;
556
557                 tce += IOMMU_PAGE_SIZE(tbl);
558         }
559
560         if (ret)
561                 tce_iommu_clear(container, tbl, entry, i);
562
563         return ret;
564 }
565
566 static long tce_iommu_create_table(struct tce_container *container,
567                         struct iommu_table_group *table_group,
568                         int num,
569                         __u32 page_shift,
570                         __u64 window_size,
571                         __u32 levels,
572                         struct iommu_table **ptbl)
573 {
574         long ret, table_size;
575
576         table_size = table_group->ops->get_table_size(page_shift, window_size,
577                         levels);
578         if (!table_size)
579                 return -EINVAL;
580
581         ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
582         if (ret)
583                 return ret;
584
585         ret = table_group->ops->create_table(table_group, num,
586                         page_shift, window_size, levels, ptbl);
587
588         WARN_ON(!ret && !(*ptbl)->it_ops->free);
589         WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
590
591         if (!ret && container->v2) {
592                 ret = tce_iommu_userspace_view_alloc(*ptbl);
593                 if (ret)
594                         (*ptbl)->it_ops->free(*ptbl);
595         }
596
597         if (ret)
598                 decrement_locked_vm(table_size >> PAGE_SHIFT);
599
600         return ret;
601 }
602
603 static void tce_iommu_free_table(struct iommu_table *tbl)
604 {
605         unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
606
607         tce_iommu_userspace_view_free(tbl);
608         tbl->it_ops->free(tbl);
609         decrement_locked_vm(pages);
610 }
611
612 static long tce_iommu_create_window(struct tce_container *container,
613                 __u32 page_shift, __u64 window_size, __u32 levels,
614                 __u64 *start_addr)
615 {
616         struct tce_iommu_group *tcegrp;
617         struct iommu_table_group *table_group;
618         struct iommu_table *tbl = NULL;
619         long ret, num;
620
621         num = tce_iommu_find_free_table(container);
622         if (num < 0)
623                 return num;
624
625         /* Get the first group for ops::create_table */
626         tcegrp = list_first_entry(&container->group_list,
627                         struct tce_iommu_group, next);
628         table_group = iommu_group_get_iommudata(tcegrp->grp);
629         if (!table_group)
630                 return -EFAULT;
631
632         if (!(table_group->pgsizes & (1ULL << page_shift)))
633                 return -EINVAL;
634
635         if (!table_group->ops->set_window || !table_group->ops->unset_window ||
636                         !table_group->ops->get_table_size ||
637                         !table_group->ops->create_table)
638                 return -EPERM;
639
640         /* Create TCE table */
641         ret = tce_iommu_create_table(container, table_group, num,
642                         page_shift, window_size, levels, &tbl);
643         if (ret)
644                 return ret;
645
646         BUG_ON(!tbl->it_ops->free);
647
648         /*
649          * Program the table to every group.
650          * Groups have been tested for compatibility at the attach time.
651          */
652         list_for_each_entry(tcegrp, &container->group_list, next) {
653                 table_group = iommu_group_get_iommudata(tcegrp->grp);
654
655                 ret = table_group->ops->set_window(table_group, num, tbl);
656                 if (ret)
657                         goto unset_exit;
658         }
659
660         container->tables[num] = tbl;
661
662         /* Return start address assigned by platform in create_table() */
663         *start_addr = tbl->it_offset << tbl->it_page_shift;
664
665         return 0;
666
667 unset_exit:
668         list_for_each_entry(tcegrp, &container->group_list, next) {
669                 table_group = iommu_group_get_iommudata(tcegrp->grp);
670                 table_group->ops->unset_window(table_group, num);
671         }
672         tce_iommu_free_table(tbl);
673
674         return ret;
675 }
676
677 static long tce_iommu_remove_window(struct tce_container *container,
678                 __u64 start_addr)
679 {
680         struct iommu_table_group *table_group = NULL;
681         struct iommu_table *tbl;
682         struct tce_iommu_group *tcegrp;
683         int num;
684
685         num = tce_iommu_find_table(container, start_addr, &tbl);
686         if (num < 0)
687                 return -EINVAL;
688
689         BUG_ON(!tbl->it_size);
690
691         /* Detach groups from IOMMUs */
692         list_for_each_entry(tcegrp, &container->group_list, next) {
693                 table_group = iommu_group_get_iommudata(tcegrp->grp);
694
695                 /*
696                  * SPAPR TCE IOMMU exposes the default DMA window to
697                  * the guest via dma32_window_start/size of
698                  * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
699                  * the userspace to remove this window, some do not so
700                  * here we check for the platform capability.
701                  */
702                 if (!table_group->ops || !table_group->ops->unset_window)
703                         return -EPERM;
704
705                 table_group->ops->unset_window(table_group, num);
706         }
707
708         /* Free table */
709         tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
710         tce_iommu_free_table(tbl);
711         container->tables[num] = NULL;
712
713         return 0;
714 }
715
716 static long tce_iommu_ioctl(void *iommu_data,
717                                  unsigned int cmd, unsigned long arg)
718 {
719         struct tce_container *container = iommu_data;
720         unsigned long minsz, ddwsz;
721         long ret;
722
723         switch (cmd) {
724         case VFIO_CHECK_EXTENSION:
725                 switch (arg) {
726                 case VFIO_SPAPR_TCE_IOMMU:
727                 case VFIO_SPAPR_TCE_v2_IOMMU:
728                         ret = 1;
729                         break;
730                 default:
731                         ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
732                         break;
733                 }
734
735                 return (ret < 0) ? 0 : ret;
736
737         case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
738                 struct vfio_iommu_spapr_tce_info info;
739                 struct tce_iommu_group *tcegrp;
740                 struct iommu_table_group *table_group;
741
742                 if (!tce_groups_attached(container))
743                         return -ENXIO;
744
745                 tcegrp = list_first_entry(&container->group_list,
746                                 struct tce_iommu_group, next);
747                 table_group = iommu_group_get_iommudata(tcegrp->grp);
748
749                 if (!table_group)
750                         return -ENXIO;
751
752                 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
753                                 dma32_window_size);
754
755                 if (copy_from_user(&info, (void __user *)arg, minsz))
756                         return -EFAULT;
757
758                 if (info.argsz < minsz)
759                         return -EINVAL;
760
761                 info.dma32_window_start = table_group->tce32_start;
762                 info.dma32_window_size = table_group->tce32_size;
763                 info.flags = 0;
764                 memset(&info.ddw, 0, sizeof(info.ddw));
765
766                 if (table_group->max_dynamic_windows_supported &&
767                                 container->v2) {
768                         info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
769                         info.ddw.pgsizes = table_group->pgsizes;
770                         info.ddw.max_dynamic_windows_supported =
771                                 table_group->max_dynamic_windows_supported;
772                         info.ddw.levels = table_group->max_levels;
773                 }
774
775                 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
776
777                 if (info.argsz >= ddwsz)
778                         minsz = ddwsz;
779
780                 if (copy_to_user((void __user *)arg, &info, minsz))
781                         return -EFAULT;
782
783                 return 0;
784         }
785         case VFIO_IOMMU_MAP_DMA: {
786                 struct vfio_iommu_type1_dma_map param;
787                 struct iommu_table *tbl = NULL;
788                 long num;
789                 enum dma_data_direction direction;
790
791                 if (!container->enabled)
792                         return -EPERM;
793
794                 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
795
796                 if (copy_from_user(&param, (void __user *)arg, minsz))
797                         return -EFAULT;
798
799                 if (param.argsz < minsz)
800                         return -EINVAL;
801
802                 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
803                                 VFIO_DMA_MAP_FLAG_WRITE))
804                         return -EINVAL;
805
806                 num = tce_iommu_find_table(container, param.iova, &tbl);
807                 if (num < 0)
808                         return -ENXIO;
809
810                 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
811                                 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
812                         return -EINVAL;
813
814                 /* iova is checked by the IOMMU API */
815                 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
816                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
817                                 direction = DMA_BIDIRECTIONAL;
818                         else
819                                 direction = DMA_TO_DEVICE;
820                 } else {
821                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
822                                 direction = DMA_FROM_DEVICE;
823                         else
824                                 return -EINVAL;
825                 }
826
827                 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
828                 if (ret)
829                         return ret;
830
831                 if (container->v2)
832                         ret = tce_iommu_build_v2(container, tbl,
833                                         param.iova >> tbl->it_page_shift,
834                                         param.vaddr,
835                                         param.size >> tbl->it_page_shift,
836                                         direction);
837                 else
838                         ret = tce_iommu_build(container, tbl,
839                                         param.iova >> tbl->it_page_shift,
840                                         param.vaddr,
841                                         param.size >> tbl->it_page_shift,
842                                         direction);
843
844                 iommu_flush_tce(tbl);
845
846                 return ret;
847         }
848         case VFIO_IOMMU_UNMAP_DMA: {
849                 struct vfio_iommu_type1_dma_unmap param;
850                 struct iommu_table *tbl = NULL;
851                 long num;
852
853                 if (!container->enabled)
854                         return -EPERM;
855
856                 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
857                                 size);
858
859                 if (copy_from_user(&param, (void __user *)arg, minsz))
860                         return -EFAULT;
861
862                 if (param.argsz < minsz)
863                         return -EINVAL;
864
865                 /* No flag is supported now */
866                 if (param.flags)
867                         return -EINVAL;
868
869                 num = tce_iommu_find_table(container, param.iova, &tbl);
870                 if (num < 0)
871                         return -ENXIO;
872
873                 if (param.size & ~IOMMU_PAGE_MASK(tbl))
874                         return -EINVAL;
875
876                 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
877                                 param.size >> tbl->it_page_shift);
878                 if (ret)
879                         return ret;
880
881                 ret = tce_iommu_clear(container, tbl,
882                                 param.iova >> tbl->it_page_shift,
883                                 param.size >> tbl->it_page_shift);
884                 iommu_flush_tce(tbl);
885
886                 return ret;
887         }
888         case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
889                 struct vfio_iommu_spapr_register_memory param;
890
891                 if (!container->v2)
892                         break;
893
894                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
895                                 size);
896
897                 if (copy_from_user(&param, (void __user *)arg, minsz))
898                         return -EFAULT;
899
900                 if (param.argsz < minsz)
901                         return -EINVAL;
902
903                 /* No flag is supported now */
904                 if (param.flags)
905                         return -EINVAL;
906
907                 mutex_lock(&container->lock);
908                 ret = tce_iommu_register_pages(container, param.vaddr,
909                                 param.size);
910                 mutex_unlock(&container->lock);
911
912                 return ret;
913         }
914         case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
915                 struct vfio_iommu_spapr_register_memory param;
916
917                 if (!container->v2)
918                         break;
919
920                 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
921                                 size);
922
923                 if (copy_from_user(&param, (void __user *)arg, minsz))
924                         return -EFAULT;
925
926                 if (param.argsz < minsz)
927                         return -EINVAL;
928
929                 /* No flag is supported now */
930                 if (param.flags)
931                         return -EINVAL;
932
933                 mutex_lock(&container->lock);
934                 ret = tce_iommu_unregister_pages(container, param.vaddr,
935                                 param.size);
936                 mutex_unlock(&container->lock);
937
938                 return ret;
939         }
940         case VFIO_IOMMU_ENABLE:
941                 if (container->v2)
942                         break;
943
944                 mutex_lock(&container->lock);
945                 ret = tce_iommu_enable(container);
946                 mutex_unlock(&container->lock);
947                 return ret;
948
949
950         case VFIO_IOMMU_DISABLE:
951                 if (container->v2)
952                         break;
953
954                 mutex_lock(&container->lock);
955                 tce_iommu_disable(container);
956                 mutex_unlock(&container->lock);
957                 return 0;
958
959         case VFIO_EEH_PE_OP: {
960                 struct tce_iommu_group *tcegrp;
961
962                 ret = 0;
963                 list_for_each_entry(tcegrp, &container->group_list, next) {
964                         ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
965                                         cmd, arg);
966                         if (ret)
967                                 return ret;
968                 }
969                 return ret;
970         }
971
972         case VFIO_IOMMU_SPAPR_TCE_CREATE: {
973                 struct vfio_iommu_spapr_tce_create create;
974
975                 if (!container->v2)
976                         break;
977
978                 if (!tce_groups_attached(container))
979                         return -ENXIO;
980
981                 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
982                                 start_addr);
983
984                 if (copy_from_user(&create, (void __user *)arg, minsz))
985                         return -EFAULT;
986
987                 if (create.argsz < minsz)
988                         return -EINVAL;
989
990                 if (create.flags)
991                         return -EINVAL;
992
993                 mutex_lock(&container->lock);
994
995                 ret = tce_iommu_create_window(container, create.page_shift,
996                                 create.window_size, create.levels,
997                                 &create.start_addr);
998
999                 mutex_unlock(&container->lock);
1000
1001                 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1002                         ret = -EFAULT;
1003
1004                 return ret;
1005         }
1006         case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1007                 struct vfio_iommu_spapr_tce_remove remove;
1008
1009                 if (!container->v2)
1010                         break;
1011
1012                 if (!tce_groups_attached(container))
1013                         return -ENXIO;
1014
1015                 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1016                                 start_addr);
1017
1018                 if (copy_from_user(&remove, (void __user *)arg, minsz))
1019                         return -EFAULT;
1020
1021                 if (remove.argsz < minsz)
1022                         return -EINVAL;
1023
1024                 if (remove.flags)
1025                         return -EINVAL;
1026
1027                 mutex_lock(&container->lock);
1028
1029                 ret = tce_iommu_remove_window(container, remove.start_addr);
1030
1031                 mutex_unlock(&container->lock);
1032
1033                 return ret;
1034         }
1035         }
1036
1037         return -ENOTTY;
1038 }
1039
1040 static void tce_iommu_release_ownership(struct tce_container *container,
1041                 struct iommu_table_group *table_group)
1042 {
1043         int i;
1044
1045         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1046                 struct iommu_table *tbl = container->tables[i];
1047
1048                 if (!tbl)
1049                         continue;
1050
1051                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1052                 tce_iommu_userspace_view_free(tbl);
1053                 if (tbl->it_map)
1054                         iommu_release_ownership(tbl);
1055
1056                 container->tables[i] = NULL;
1057         }
1058 }
1059
1060 static int tce_iommu_take_ownership(struct tce_container *container,
1061                 struct iommu_table_group *table_group)
1062 {
1063         int i, j, rc = 0;
1064
1065         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1066                 struct iommu_table *tbl = table_group->tables[i];
1067
1068                 if (!tbl || !tbl->it_map)
1069                         continue;
1070
1071                 rc = tce_iommu_userspace_view_alloc(tbl);
1072                 if (!rc)
1073                         rc = iommu_take_ownership(tbl);
1074
1075                 if (rc) {
1076                         for (j = 0; j < i; ++j)
1077                                 iommu_release_ownership(
1078                                                 table_group->tables[j]);
1079
1080                         return rc;
1081                 }
1082         }
1083
1084         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1085                 container->tables[i] = table_group->tables[i];
1086
1087         return 0;
1088 }
1089
1090 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1091                 struct iommu_table_group *table_group)
1092 {
1093         long i;
1094
1095         if (!table_group->ops->unset_window) {
1096                 WARN_ON_ONCE(1);
1097                 return;
1098         }
1099
1100         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1101                 table_group->ops->unset_window(table_group, i);
1102
1103         table_group->ops->release_ownership(table_group);
1104 }
1105
1106 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1107                 struct iommu_table_group *table_group)
1108 {
1109         long i, ret = 0;
1110         struct iommu_table *tbl = NULL;
1111
1112         if (!table_group->ops->create_table || !table_group->ops->set_window ||
1113                         !table_group->ops->release_ownership) {
1114                 WARN_ON_ONCE(1);
1115                 return -EFAULT;
1116         }
1117
1118         table_group->ops->take_ownership(table_group);
1119
1120         /*
1121          * If it the first group attached, check if there is
1122          * a default DMA window and create one if none as
1123          * the userspace expects it to exist.
1124          */
1125         if (!tce_groups_attached(container) && !container->tables[0]) {
1126                 ret = tce_iommu_create_table(container,
1127                                 table_group,
1128                                 0, /* window number */
1129                                 IOMMU_PAGE_SHIFT_4K,
1130                                 table_group->tce32_size,
1131                                 1, /* default levels */
1132                                 &tbl);
1133                 if (ret)
1134                         goto release_exit;
1135                 else
1136                         container->tables[0] = tbl;
1137         }
1138
1139         /* Set all windows to the new group */
1140         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1141                 tbl = container->tables[i];
1142
1143                 if (!tbl)
1144                         continue;
1145
1146                 /* Set the default window to a new group */
1147                 ret = table_group->ops->set_window(table_group, i, tbl);
1148                 if (ret)
1149                         goto release_exit;
1150         }
1151
1152         return 0;
1153
1154 release_exit:
1155         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1156                 table_group->ops->unset_window(table_group, i);
1157
1158         table_group->ops->release_ownership(table_group);
1159
1160         return ret;
1161 }
1162
1163 static int tce_iommu_attach_group(void *iommu_data,
1164                 struct iommu_group *iommu_group)
1165 {
1166         int ret;
1167         struct tce_container *container = iommu_data;
1168         struct iommu_table_group *table_group;
1169         struct tce_iommu_group *tcegrp = NULL;
1170
1171         mutex_lock(&container->lock);
1172
1173         /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1174                         iommu_group_id(iommu_group), iommu_group); */
1175         table_group = iommu_group_get_iommudata(iommu_group);
1176
1177         if (tce_groups_attached(container) && (!table_group->ops ||
1178                         !table_group->ops->take_ownership ||
1179                         !table_group->ops->release_ownership)) {
1180                 ret = -EBUSY;
1181                 goto unlock_exit;
1182         }
1183
1184         /* Check if new group has the same iommu_ops (i.e. compatible) */
1185         list_for_each_entry(tcegrp, &container->group_list, next) {
1186                 struct iommu_table_group *table_group_tmp;
1187
1188                 if (tcegrp->grp == iommu_group) {
1189                         pr_warn("tce_vfio: Group %d is already attached\n",
1190                                         iommu_group_id(iommu_group));
1191                         ret = -EBUSY;
1192                         goto unlock_exit;
1193                 }
1194                 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1195                 if (table_group_tmp->ops->create_table !=
1196                                 table_group->ops->create_table) {
1197                         pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1198                                         iommu_group_id(iommu_group),
1199                                         iommu_group_id(tcegrp->grp));
1200                         ret = -EPERM;
1201                         goto unlock_exit;
1202                 }
1203         }
1204
1205         tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1206         if (!tcegrp) {
1207                 ret = -ENOMEM;
1208                 goto unlock_exit;
1209         }
1210
1211         if (!table_group->ops || !table_group->ops->take_ownership ||
1212                         !table_group->ops->release_ownership)
1213                 ret = tce_iommu_take_ownership(container, table_group);
1214         else
1215                 ret = tce_iommu_take_ownership_ddw(container, table_group);
1216
1217         if (!ret) {
1218                 tcegrp->grp = iommu_group;
1219                 list_add(&tcegrp->next, &container->group_list);
1220         }
1221
1222 unlock_exit:
1223         if (ret && tcegrp)
1224                 kfree(tcegrp);
1225
1226         mutex_unlock(&container->lock);
1227
1228         return ret;
1229 }
1230
1231 static void tce_iommu_detach_group(void *iommu_data,
1232                 struct iommu_group *iommu_group)
1233 {
1234         struct tce_container *container = iommu_data;
1235         struct iommu_table_group *table_group;
1236         bool found = false;
1237         struct tce_iommu_group *tcegrp;
1238
1239         mutex_lock(&container->lock);
1240
1241         list_for_each_entry(tcegrp, &container->group_list, next) {
1242                 if (tcegrp->grp == iommu_group) {
1243                         found = true;
1244                         break;
1245                 }
1246         }
1247
1248         if (!found) {
1249                 pr_warn("tce_vfio: detaching unattached group #%u\n",
1250                                 iommu_group_id(iommu_group));
1251                 goto unlock_exit;
1252         }
1253
1254         list_del(&tcegrp->next);
1255         kfree(tcegrp);
1256
1257         table_group = iommu_group_get_iommudata(iommu_group);
1258         BUG_ON(!table_group);
1259
1260         if (!table_group->ops || !table_group->ops->release_ownership)
1261                 tce_iommu_release_ownership(container, table_group);
1262         else
1263                 tce_iommu_release_ownership_ddw(container, table_group);
1264
1265 unlock_exit:
1266         mutex_unlock(&container->lock);
1267 }
1268
1269 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1270         .name           = "iommu-vfio-powerpc",
1271         .owner          = THIS_MODULE,
1272         .open           = tce_iommu_open,
1273         .release        = tce_iommu_release,
1274         .ioctl          = tce_iommu_ioctl,
1275         .attach_group   = tce_iommu_attach_group,
1276         .detach_group   = tce_iommu_detach_group,
1277 };
1278
1279 static int __init tce_iommu_init(void)
1280 {
1281         return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1282 }
1283
1284 static void __exit tce_iommu_cleanup(void)
1285 {
1286         vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1287 }
1288
1289 module_init(tce_iommu_init);
1290 module_exit(tce_iommu_cleanup);
1291
1292 MODULE_VERSION(DRIVER_VERSION);
1293 MODULE_LICENSE("GPL v2");
1294 MODULE_AUTHOR(DRIVER_AUTHOR);
1295 MODULE_DESCRIPTION(DRIVER_DESC);
1296