2 * VFIO: IOMMU DMA mapping support for TCE on POWER
4 * Copyright (C) 2013 IBM Corp. All rights reserved.
5 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * Derived from original vfio_iommu_type1.c:
12 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
13 * Author: Alex Williamson <alex.williamson@redhat.com>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
25 #include <asm/mmu_context.h>
27 #define DRIVER_VERSION "0.1"
28 #define DRIVER_AUTHOR "aik@ozlabs.ru"
29 #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
31 static void tce_iommu_detach_group(void *iommu_data,
32 struct iommu_group *iommu_group);
34 static long try_increment_locked_vm(long npages)
36 long ret = 0, locked, lock_limit;
38 if (!current || !current->mm)
39 return -ESRCH; /* process exited */
44 down_write(¤t->mm->mmap_sem);
45 locked = current->mm->locked_vm + npages;
46 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
50 current->mm->locked_vm += npages;
52 pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
54 current->mm->locked_vm << PAGE_SHIFT,
55 rlimit(RLIMIT_MEMLOCK),
56 ret ? " - exceeded" : "");
58 up_write(¤t->mm->mmap_sem);
63 static void decrement_locked_vm(long npages)
65 if (!current || !current->mm || !npages)
66 return; /* process exited */
68 down_write(¤t->mm->mmap_sem);
69 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
70 npages = current->mm->locked_vm;
71 current->mm->locked_vm -= npages;
72 pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
74 current->mm->locked_vm << PAGE_SHIFT,
75 rlimit(RLIMIT_MEMLOCK));
76 up_write(¤t->mm->mmap_sem);
80 * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
82 * This code handles mapping and unmapping of user data buffers
83 * into DMA'ble space using the IOMMU
86 struct tce_iommu_group {
87 struct list_head next;
88 struct iommu_group *grp;
92 * The container descriptor supports only a single group per container.
93 * Required by the API as the container is not supplied with the IOMMU group
94 * at the moment of initialization.
96 struct tce_container {
100 unsigned long locked_pages;
101 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
102 struct list_head group_list;
105 static long tce_iommu_unregister_pages(struct tce_container *container,
106 __u64 vaddr, __u64 size)
108 struct mm_iommu_table_group_mem_t *mem;
110 if (!current || !current->mm)
111 return -ESRCH; /* process exited */
113 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
116 mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT);
120 return mm_iommu_put(current->mm, mem);
123 static long tce_iommu_register_pages(struct tce_container *container,
124 __u64 vaddr, __u64 size)
127 struct mm_iommu_table_group_mem_t *mem = NULL;
128 unsigned long entries = size >> PAGE_SHIFT;
130 if (!current || !current->mm)
131 return -ESRCH; /* process exited */
133 if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
134 ((vaddr + size) < vaddr))
137 ret = mm_iommu_get(current->mm, vaddr, entries, &mem);
141 container->enabled = true;
146 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
148 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
149 tbl->it_size, PAGE_SIZE);
153 BUG_ON(tbl->it_userspace);
155 ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
161 decrement_locked_vm(cb >> PAGE_SHIFT);
164 tbl->it_userspace = uas;
169 static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
171 unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
172 tbl->it_size, PAGE_SIZE);
174 if (!tbl->it_userspace)
177 vfree(tbl->it_userspace);
178 tbl->it_userspace = NULL;
179 decrement_locked_vm(cb >> PAGE_SHIFT);
182 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
185 * Check that the TCE table granularity is not bigger than the size of
186 * a page we just found. Otherwise the hardware can get access to
187 * a bigger memory chunk that it should.
189 return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
192 static inline bool tce_groups_attached(struct tce_container *container)
194 return !list_empty(&container->group_list);
197 static long tce_iommu_find_table(struct tce_container *container,
198 phys_addr_t ioba, struct iommu_table **ptbl)
202 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
203 struct iommu_table *tbl = container->tables[i];
206 unsigned long entry = ioba >> tbl->it_page_shift;
207 unsigned long start = tbl->it_offset;
208 unsigned long end = start + tbl->it_size;
210 if ((start <= entry) && (entry < end)) {
220 static int tce_iommu_find_free_table(struct tce_container *container)
224 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
225 if (!container->tables[i])
232 static int tce_iommu_enable(struct tce_container *container)
235 unsigned long locked;
236 struct iommu_table_group *table_group;
237 struct tce_iommu_group *tcegrp;
240 return -ESRCH; /* process exited */
242 if (container->enabled)
246 * When userspace pages are mapped into the IOMMU, they are effectively
247 * locked memory, so, theoretically, we need to update the accounting
248 * of locked pages on each map and unmap. For powerpc, the map unmap
249 * paths can be very hot, though, and the accounting would kill
250 * performance, especially since it would be difficult to impossible
251 * to handle the accounting in real mode only.
253 * To address that, rather than precisely accounting every page, we
254 * instead account for a worst case on locked memory when the iommu is
255 * enabled and disabled. The worst case upper bound on locked memory
256 * is the size of the whole iommu window, which is usually relatively
257 * small (compared to total memory sizes) on POWER hardware.
259 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
260 * that would effectively kill the guest at random points, much better
261 * enforcing the limit based on the max that the guest can map.
263 * Unfortunately at the moment it counts whole tables, no matter how
264 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
265 * each with 2GB DMA window, 8GB will be counted here. The reason for
266 * this is that we cannot tell here the amount of RAM used by the guest
267 * as this information is only available from KVM and VFIO is
270 * So we do not allow enabling a container without a group attached
271 * as there is no way to know how much we should increment
272 * the locked_vm counter.
274 if (!tce_groups_attached(container))
277 tcegrp = list_first_entry(&container->group_list,
278 struct tce_iommu_group, next);
279 table_group = iommu_group_get_iommudata(tcegrp->grp);
283 if (!table_group->tce32_size)
286 locked = table_group->tce32_size >> PAGE_SHIFT;
287 ret = try_increment_locked_vm(locked);
291 container->locked_pages = locked;
293 container->enabled = true;
298 static void tce_iommu_disable(struct tce_container *container)
300 if (!container->enabled)
303 container->enabled = false;
308 decrement_locked_vm(container->locked_pages);
311 static void *tce_iommu_open(unsigned long arg)
313 struct tce_container *container;
315 if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
316 pr_err("tce_vfio: Wrong IOMMU type\n");
317 return ERR_PTR(-EINVAL);
320 container = kzalloc(sizeof(*container), GFP_KERNEL);
322 return ERR_PTR(-ENOMEM);
324 mutex_init(&container->lock);
325 INIT_LIST_HEAD_RCU(&container->group_list);
327 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
332 static int tce_iommu_clear(struct tce_container *container,
333 struct iommu_table *tbl,
334 unsigned long entry, unsigned long pages);
335 static void tce_iommu_free_table(struct iommu_table *tbl);
337 static void tce_iommu_release(void *iommu_data)
339 struct tce_container *container = iommu_data;
340 struct tce_iommu_group *tcegrp;
343 while (tce_groups_attached(container)) {
344 tcegrp = list_first_entry(&container->group_list,
345 struct tce_iommu_group, next);
346 tce_iommu_detach_group(iommu_data, tcegrp->grp);
350 * If VFIO created a table, it was not disposed
351 * by tce_iommu_detach_group() so do it now.
353 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
354 struct iommu_table *tbl = container->tables[i];
359 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
360 tce_iommu_free_table(tbl);
363 tce_iommu_disable(container);
364 mutex_destroy(&container->lock);
369 static void tce_iommu_unuse_page(struct tce_container *container,
374 page = pfn_to_page(hpa >> PAGE_SHIFT);
378 static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
379 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
382 struct mm_iommu_table_group_mem_t *mem;
384 mem = mm_iommu_lookup(current->mm, tce, size);
388 ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
397 static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
400 struct mm_iommu_table_group_mem_t *mem = NULL;
402 unsigned long hpa = 0;
403 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
405 if (!pua || !current || !current->mm)
408 ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
411 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
412 __func__, *pua, entry, ret);
414 mm_iommu_mapped_dec(mem);
419 static int tce_iommu_clear(struct tce_container *container,
420 struct iommu_table *tbl,
421 unsigned long entry, unsigned long pages)
423 unsigned long oldhpa;
425 enum dma_data_direction direction;
427 for ( ; pages; --pages, ++entry) {
428 direction = DMA_NONE;
430 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
434 if (direction == DMA_NONE)
438 tce_iommu_unuse_page_v2(tbl, entry);
442 tce_iommu_unuse_page(container, oldhpa);
448 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
450 struct page *page = NULL;
451 enum dma_data_direction direction = iommu_tce_direction(tce);
453 if (get_user_pages_fast(tce & PAGE_MASK, 1,
454 direction != DMA_TO_DEVICE, &page) != 1)
457 *hpa = __pa((unsigned long) page_address(page));
462 static long tce_iommu_build(struct tce_container *container,
463 struct iommu_table *tbl,
464 unsigned long entry, unsigned long tce, unsigned long pages,
465 enum dma_data_direction direction)
470 enum dma_data_direction dirtmp;
472 for (i = 0; i < pages; ++i) {
473 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
475 ret = tce_iommu_use_page(tce, &hpa);
479 page = pfn_to_page(hpa >> PAGE_SHIFT);
480 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
487 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
489 tce_iommu_unuse_page(container, hpa);
490 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
491 __func__, entry << tbl->it_page_shift,
496 if (dirtmp != DMA_NONE)
497 tce_iommu_unuse_page(container, hpa);
499 tce += IOMMU_PAGE_SIZE(tbl);
503 tce_iommu_clear(container, tbl, entry, i);
508 static long tce_iommu_build_v2(struct tce_container *container,
509 struct iommu_table *tbl,
510 unsigned long entry, unsigned long tce, unsigned long pages,
511 enum dma_data_direction direction)
516 enum dma_data_direction dirtmp;
518 for (i = 0; i < pages; ++i) {
519 struct mm_iommu_table_group_mem_t *mem = NULL;
520 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
523 ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
528 page = pfn_to_page(hpa >> PAGE_SHIFT);
529 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
534 /* Preserve offset within IOMMU page */
535 hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
538 /* The registered region is being unregistered */
539 if (mm_iommu_mapped_inc(mem))
542 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
544 /* dirtmp cannot be DMA_NONE here */
545 tce_iommu_unuse_page_v2(tbl, entry + i);
546 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
547 __func__, entry << tbl->it_page_shift,
552 if (dirtmp != DMA_NONE)
553 tce_iommu_unuse_page_v2(tbl, entry + i);
557 tce += IOMMU_PAGE_SIZE(tbl);
561 tce_iommu_clear(container, tbl, entry, i);
566 static long tce_iommu_create_table(struct tce_container *container,
567 struct iommu_table_group *table_group,
572 struct iommu_table **ptbl)
574 long ret, table_size;
576 table_size = table_group->ops->get_table_size(page_shift, window_size,
581 ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
585 ret = table_group->ops->create_table(table_group, num,
586 page_shift, window_size, levels, ptbl);
588 WARN_ON(!ret && !(*ptbl)->it_ops->free);
589 WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
591 if (!ret && container->v2) {
592 ret = tce_iommu_userspace_view_alloc(*ptbl);
594 (*ptbl)->it_ops->free(*ptbl);
598 decrement_locked_vm(table_size >> PAGE_SHIFT);
603 static void tce_iommu_free_table(struct iommu_table *tbl)
605 unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
607 tce_iommu_userspace_view_free(tbl);
608 tbl->it_ops->free(tbl);
609 decrement_locked_vm(pages);
612 static long tce_iommu_create_window(struct tce_container *container,
613 __u32 page_shift, __u64 window_size, __u32 levels,
616 struct tce_iommu_group *tcegrp;
617 struct iommu_table_group *table_group;
618 struct iommu_table *tbl = NULL;
621 num = tce_iommu_find_free_table(container);
625 /* Get the first group for ops::create_table */
626 tcegrp = list_first_entry(&container->group_list,
627 struct tce_iommu_group, next);
628 table_group = iommu_group_get_iommudata(tcegrp->grp);
632 if (!(table_group->pgsizes & (1ULL << page_shift)))
635 if (!table_group->ops->set_window || !table_group->ops->unset_window ||
636 !table_group->ops->get_table_size ||
637 !table_group->ops->create_table)
640 /* Create TCE table */
641 ret = tce_iommu_create_table(container, table_group, num,
642 page_shift, window_size, levels, &tbl);
646 BUG_ON(!tbl->it_ops->free);
649 * Program the table to every group.
650 * Groups have been tested for compatibility at the attach time.
652 list_for_each_entry(tcegrp, &container->group_list, next) {
653 table_group = iommu_group_get_iommudata(tcegrp->grp);
655 ret = table_group->ops->set_window(table_group, num, tbl);
660 container->tables[num] = tbl;
662 /* Return start address assigned by platform in create_table() */
663 *start_addr = tbl->it_offset << tbl->it_page_shift;
668 list_for_each_entry(tcegrp, &container->group_list, next) {
669 table_group = iommu_group_get_iommudata(tcegrp->grp);
670 table_group->ops->unset_window(table_group, num);
672 tce_iommu_free_table(tbl);
677 static long tce_iommu_remove_window(struct tce_container *container,
680 struct iommu_table_group *table_group = NULL;
681 struct iommu_table *tbl;
682 struct tce_iommu_group *tcegrp;
685 num = tce_iommu_find_table(container, start_addr, &tbl);
689 BUG_ON(!tbl->it_size);
691 /* Detach groups from IOMMUs */
692 list_for_each_entry(tcegrp, &container->group_list, next) {
693 table_group = iommu_group_get_iommudata(tcegrp->grp);
696 * SPAPR TCE IOMMU exposes the default DMA window to
697 * the guest via dma32_window_start/size of
698 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
699 * the userspace to remove this window, some do not so
700 * here we check for the platform capability.
702 if (!table_group->ops || !table_group->ops->unset_window)
705 table_group->ops->unset_window(table_group, num);
709 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
710 tce_iommu_free_table(tbl);
711 container->tables[num] = NULL;
716 static long tce_iommu_ioctl(void *iommu_data,
717 unsigned int cmd, unsigned long arg)
719 struct tce_container *container = iommu_data;
720 unsigned long minsz, ddwsz;
724 case VFIO_CHECK_EXTENSION:
726 case VFIO_SPAPR_TCE_IOMMU:
727 case VFIO_SPAPR_TCE_v2_IOMMU:
731 ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
735 return (ret < 0) ? 0 : ret;
737 case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
738 struct vfio_iommu_spapr_tce_info info;
739 struct tce_iommu_group *tcegrp;
740 struct iommu_table_group *table_group;
742 if (!tce_groups_attached(container))
745 tcegrp = list_first_entry(&container->group_list,
746 struct tce_iommu_group, next);
747 table_group = iommu_group_get_iommudata(tcegrp->grp);
752 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
755 if (copy_from_user(&info, (void __user *)arg, minsz))
758 if (info.argsz < minsz)
761 info.dma32_window_start = table_group->tce32_start;
762 info.dma32_window_size = table_group->tce32_size;
764 memset(&info.ddw, 0, sizeof(info.ddw));
766 if (table_group->max_dynamic_windows_supported &&
768 info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
769 info.ddw.pgsizes = table_group->pgsizes;
770 info.ddw.max_dynamic_windows_supported =
771 table_group->max_dynamic_windows_supported;
772 info.ddw.levels = table_group->max_levels;
775 ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
777 if (info.argsz >= ddwsz)
780 if (copy_to_user((void __user *)arg, &info, minsz))
785 case VFIO_IOMMU_MAP_DMA: {
786 struct vfio_iommu_type1_dma_map param;
787 struct iommu_table *tbl = NULL;
789 enum dma_data_direction direction;
791 if (!container->enabled)
794 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
796 if (copy_from_user(¶m, (void __user *)arg, minsz))
799 if (param.argsz < minsz)
802 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
803 VFIO_DMA_MAP_FLAG_WRITE))
806 num = tce_iommu_find_table(container, param.iova, &tbl);
810 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
811 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
814 /* iova is checked by the IOMMU API */
815 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
816 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
817 direction = DMA_BIDIRECTIONAL;
819 direction = DMA_TO_DEVICE;
821 if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
822 direction = DMA_FROM_DEVICE;
827 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
832 ret = tce_iommu_build_v2(container, tbl,
833 param.iova >> tbl->it_page_shift,
835 param.size >> tbl->it_page_shift,
838 ret = tce_iommu_build(container, tbl,
839 param.iova >> tbl->it_page_shift,
841 param.size >> tbl->it_page_shift,
844 iommu_flush_tce(tbl);
848 case VFIO_IOMMU_UNMAP_DMA: {
849 struct vfio_iommu_type1_dma_unmap param;
850 struct iommu_table *tbl = NULL;
853 if (!container->enabled)
856 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
859 if (copy_from_user(¶m, (void __user *)arg, minsz))
862 if (param.argsz < minsz)
865 /* No flag is supported now */
869 num = tce_iommu_find_table(container, param.iova, &tbl);
873 if (param.size & ~IOMMU_PAGE_MASK(tbl))
876 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
877 param.size >> tbl->it_page_shift);
881 ret = tce_iommu_clear(container, tbl,
882 param.iova >> tbl->it_page_shift,
883 param.size >> tbl->it_page_shift);
884 iommu_flush_tce(tbl);
888 case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
889 struct vfio_iommu_spapr_register_memory param;
894 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
897 if (copy_from_user(¶m, (void __user *)arg, minsz))
900 if (param.argsz < minsz)
903 /* No flag is supported now */
907 mutex_lock(&container->lock);
908 ret = tce_iommu_register_pages(container, param.vaddr,
910 mutex_unlock(&container->lock);
914 case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
915 struct vfio_iommu_spapr_register_memory param;
920 minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
923 if (copy_from_user(¶m, (void __user *)arg, minsz))
926 if (param.argsz < minsz)
929 /* No flag is supported now */
933 mutex_lock(&container->lock);
934 ret = tce_iommu_unregister_pages(container, param.vaddr,
936 mutex_unlock(&container->lock);
940 case VFIO_IOMMU_ENABLE:
944 mutex_lock(&container->lock);
945 ret = tce_iommu_enable(container);
946 mutex_unlock(&container->lock);
950 case VFIO_IOMMU_DISABLE:
954 mutex_lock(&container->lock);
955 tce_iommu_disable(container);
956 mutex_unlock(&container->lock);
959 case VFIO_EEH_PE_OP: {
960 struct tce_iommu_group *tcegrp;
963 list_for_each_entry(tcegrp, &container->group_list, next) {
964 ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
972 case VFIO_IOMMU_SPAPR_TCE_CREATE: {
973 struct vfio_iommu_spapr_tce_create create;
978 if (!tce_groups_attached(container))
981 minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
984 if (copy_from_user(&create, (void __user *)arg, minsz))
987 if (create.argsz < minsz)
993 mutex_lock(&container->lock);
995 ret = tce_iommu_create_window(container, create.page_shift,
996 create.window_size, create.levels,
999 mutex_unlock(&container->lock);
1001 if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1006 case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1007 struct vfio_iommu_spapr_tce_remove remove;
1012 if (!tce_groups_attached(container))
1015 minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1018 if (copy_from_user(&remove, (void __user *)arg, minsz))
1021 if (remove.argsz < minsz)
1027 mutex_lock(&container->lock);
1029 ret = tce_iommu_remove_window(container, remove.start_addr);
1031 mutex_unlock(&container->lock);
1040 static void tce_iommu_release_ownership(struct tce_container *container,
1041 struct iommu_table_group *table_group)
1045 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1046 struct iommu_table *tbl = container->tables[i];
1051 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1052 tce_iommu_userspace_view_free(tbl);
1054 iommu_release_ownership(tbl);
1056 container->tables[i] = NULL;
1060 static int tce_iommu_take_ownership(struct tce_container *container,
1061 struct iommu_table_group *table_group)
1065 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1066 struct iommu_table *tbl = table_group->tables[i];
1068 if (!tbl || !tbl->it_map)
1071 rc = tce_iommu_userspace_view_alloc(tbl);
1073 rc = iommu_take_ownership(tbl);
1076 for (j = 0; j < i; ++j)
1077 iommu_release_ownership(
1078 table_group->tables[j]);
1084 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1085 container->tables[i] = table_group->tables[i];
1090 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1091 struct iommu_table_group *table_group)
1095 if (!table_group->ops->unset_window) {
1100 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1101 table_group->ops->unset_window(table_group, i);
1103 table_group->ops->release_ownership(table_group);
1106 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1107 struct iommu_table_group *table_group)
1110 struct iommu_table *tbl = NULL;
1112 if (!table_group->ops->create_table || !table_group->ops->set_window ||
1113 !table_group->ops->release_ownership) {
1118 table_group->ops->take_ownership(table_group);
1121 * If it the first group attached, check if there is
1122 * a default DMA window and create one if none as
1123 * the userspace expects it to exist.
1125 if (!tce_groups_attached(container) && !container->tables[0]) {
1126 ret = tce_iommu_create_table(container,
1128 0, /* window number */
1129 IOMMU_PAGE_SHIFT_4K,
1130 table_group->tce32_size,
1131 1, /* default levels */
1136 container->tables[0] = tbl;
1139 /* Set all windows to the new group */
1140 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1141 tbl = container->tables[i];
1146 /* Set the default window to a new group */
1147 ret = table_group->ops->set_window(table_group, i, tbl);
1155 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1156 table_group->ops->unset_window(table_group, i);
1158 table_group->ops->release_ownership(table_group);
1163 static int tce_iommu_attach_group(void *iommu_data,
1164 struct iommu_group *iommu_group)
1167 struct tce_container *container = iommu_data;
1168 struct iommu_table_group *table_group;
1169 struct tce_iommu_group *tcegrp = NULL;
1171 mutex_lock(&container->lock);
1173 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1174 iommu_group_id(iommu_group), iommu_group); */
1175 table_group = iommu_group_get_iommudata(iommu_group);
1177 if (tce_groups_attached(container) && (!table_group->ops ||
1178 !table_group->ops->take_ownership ||
1179 !table_group->ops->release_ownership)) {
1184 /* Check if new group has the same iommu_ops (i.e. compatible) */
1185 list_for_each_entry(tcegrp, &container->group_list, next) {
1186 struct iommu_table_group *table_group_tmp;
1188 if (tcegrp->grp == iommu_group) {
1189 pr_warn("tce_vfio: Group %d is already attached\n",
1190 iommu_group_id(iommu_group));
1194 table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1195 if (table_group_tmp->ops->create_table !=
1196 table_group->ops->create_table) {
1197 pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1198 iommu_group_id(iommu_group),
1199 iommu_group_id(tcegrp->grp));
1205 tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1211 if (!table_group->ops || !table_group->ops->take_ownership ||
1212 !table_group->ops->release_ownership)
1213 ret = tce_iommu_take_ownership(container, table_group);
1215 ret = tce_iommu_take_ownership_ddw(container, table_group);
1218 tcegrp->grp = iommu_group;
1219 list_add(&tcegrp->next, &container->group_list);
1226 mutex_unlock(&container->lock);
1231 static void tce_iommu_detach_group(void *iommu_data,
1232 struct iommu_group *iommu_group)
1234 struct tce_container *container = iommu_data;
1235 struct iommu_table_group *table_group;
1237 struct tce_iommu_group *tcegrp;
1239 mutex_lock(&container->lock);
1241 list_for_each_entry(tcegrp, &container->group_list, next) {
1242 if (tcegrp->grp == iommu_group) {
1249 pr_warn("tce_vfio: detaching unattached group #%u\n",
1250 iommu_group_id(iommu_group));
1254 list_del(&tcegrp->next);
1257 table_group = iommu_group_get_iommudata(iommu_group);
1258 BUG_ON(!table_group);
1260 if (!table_group->ops || !table_group->ops->release_ownership)
1261 tce_iommu_release_ownership(container, table_group);
1263 tce_iommu_release_ownership_ddw(container, table_group);
1266 mutex_unlock(&container->lock);
1269 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1270 .name = "iommu-vfio-powerpc",
1271 .owner = THIS_MODULE,
1272 .open = tce_iommu_open,
1273 .release = tce_iommu_release,
1274 .ioctl = tce_iommu_ioctl,
1275 .attach_group = tce_iommu_attach_group,
1276 .detach_group = tce_iommu_detach_group,
1279 static int __init tce_iommu_init(void)
1281 return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1284 static void __exit tce_iommu_cleanup(void)
1286 vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1289 module_init(tce_iommu_init);
1290 module_exit(tce_iommu_cleanup);
1292 MODULE_VERSION(DRIVER_VERSION);
1293 MODULE_LICENSE("GPL v2");
1294 MODULE_AUTHOR(DRIVER_AUTHOR);
1295 MODULE_DESCRIPTION(DRIVER_DESC);