1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
23 #include <linux/rcupdate_wait.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/book3s/64/mmu-hash.h>
28 #include <asm/hvcall.h>
29 #include <asm/synch.h>
30 #include <asm/ppc-opcode.h>
32 #include <asm/iommu.h>
34 #include <asm/mmu_context.h>
36 static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
39 struct kvmppc_spapr_tce_table *stt;
41 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
42 if (stt->liobn == liobn)
48 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
50 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
53 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
55 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
56 (tce_pages * sizeof(struct page *));
58 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
61 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
63 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
64 struct kvmppc_spapr_tce_iommu_table, rcu);
66 iommu_tce_table_put(stit->tbl);
71 static void kvm_spapr_tce_liobn_put(struct kref *kref)
73 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
74 struct kvmppc_spapr_tce_iommu_table, kref);
76 list_del_rcu(&stit->next);
78 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
81 void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
82 struct iommu_group *grp)
85 struct kvmppc_spapr_tce_table *stt;
86 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
87 struct iommu_table_group *table_group = NULL;
90 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
92 table_group = iommu_group_get_iommudata(grp);
93 if (WARN_ON(!table_group))
96 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
97 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
98 if (table_group->tables[i] != stit->tbl)
101 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
109 long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
110 struct iommu_group *grp)
112 struct kvmppc_spapr_tce_table *stt = NULL;
114 struct iommu_table *tbl = NULL;
115 struct iommu_table_group *table_group;
117 struct kvmppc_spapr_tce_iommu_table *stit;
125 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
126 if (stt == f.file->private_data) {
138 table_group = iommu_group_get_iommudata(grp);
139 if (WARN_ON(!table_group))
142 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
143 struct iommu_table *tbltmp = table_group->tables[i];
147 /* Make sure hardware table parameters are compatible */
148 if ((tbltmp->it_page_shift <= stt->page_shift) &&
149 (tbltmp->it_offset << tbltmp->it_page_shift ==
150 stt->offset << stt->page_shift) &&
151 (tbltmp->it_size << tbltmp->it_page_shift >=
152 stt->size << stt->page_shift)) {
154 * Reference the table to avoid races with
155 * add/remove DMA windows.
157 tbl = iommu_tce_table_get(tbltmp);
165 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
166 if (tbl != stit->tbl)
169 if (!kref_get_unless_zero(&stit->kref)) {
170 /* stit is being destroyed */
171 iommu_tce_table_put(tbl);
176 * The table is already known to this KVM, we just increased
177 * its KVM reference counter and can return.
184 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
186 iommu_tce_table_put(tbl);
191 kref_init(&stit->kref);
193 list_add_rcu(&stit->next, &stt->iommu_tables);
198 static void release_spapr_tce_table(struct rcu_head *head)
200 struct kvmppc_spapr_tce_table *stt = container_of(head,
201 struct kvmppc_spapr_tce_table, rcu);
202 unsigned long i, npages = kvmppc_tce_pages(stt->size);
204 for (i = 0; i < npages; i++)
206 __free_page(stt->pages[i]);
211 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
212 unsigned long sttpage)
214 struct page *page = stt->pages[sttpage];
219 mutex_lock(&stt->alloc_lock);
220 page = stt->pages[sttpage];
222 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
225 stt->pages[sttpage] = page;
227 mutex_unlock(&stt->alloc_lock);
232 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
234 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
237 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
238 return VM_FAULT_SIGBUS;
240 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
249 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
250 .fault = kvm_spapr_tce_fault,
253 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
255 vma->vm_ops = &kvm_spapr_tce_vm_ops;
259 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
261 struct kvmppc_spapr_tce_table *stt = filp->private_data;
262 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
263 struct kvm *kvm = stt->kvm;
265 mutex_lock(&kvm->lock);
266 list_del_rcu(&stt->list);
267 mutex_unlock(&kvm->lock);
269 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
270 WARN_ON(!kref_read(&stit->kref));
272 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
277 account_locked_vm(kvm->mm,
278 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
280 kvm_put_kvm(stt->kvm);
282 call_rcu(&stt->rcu, release_spapr_tce_table);
287 static const struct file_operations kvm_spapr_tce_fops = {
288 .mmap = kvm_spapr_tce_mmap,
289 .release = kvm_spapr_tce_release,
292 int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
293 struct kvm_create_spapr_tce_64 *args)
295 struct kvmppc_spapr_tce_table *stt = NULL;
296 struct kvmppc_spapr_tce_table *siter;
297 struct mm_struct *mm = kvm->mm;
298 unsigned long npages;
301 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
302 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
305 npages = kvmppc_tce_pages(args->size);
306 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
311 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
315 stt->liobn = args->liobn;
316 stt->page_shift = args->page_shift;
317 stt->offset = args->offset;
318 stt->size = args->size;
320 mutex_init(&stt->alloc_lock);
321 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
323 mutex_lock(&kvm->lock);
325 /* Check this LIOBN hasn't been previously allocated */
327 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
328 if (siter->liobn == args->liobn) {
336 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
337 stt, O_RDWR | O_CLOEXEC);
340 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
342 kvm_put_kvm_no_destroy(kvm);
344 mutex_unlock(&kvm->lock);
351 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
355 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
358 unsigned long gfn = tce >> PAGE_SHIFT;
359 struct kvm_memory_slot *memslot;
361 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
365 *ua = __gfn_to_hva_memslot(memslot, gfn) |
366 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
371 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
374 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
375 enum dma_data_direction dir = iommu_tce_direction(tce);
376 struct kvmppc_spapr_tce_iommu_table *stit;
377 unsigned long ua = 0;
379 /* Allow userspace to poison TCE table */
383 if (iommu_tce_check_gpa(stt->page_shift, gpa))
386 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
390 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
391 unsigned long hpa = 0;
392 struct mm_iommu_table_group_mem_t *mem;
393 long shift = stit->tbl->it_page_shift;
395 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
396 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
407 * Handles TCE requests for emulated devices.
408 * Puts guest TCE values to the table and expects user space to convert them.
409 * Cannot fail so kvmppc_tce_validate must be called before it.
411 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
412 unsigned long idx, unsigned long tce)
416 unsigned long sttpage;
419 sttpage = idx / TCES_PER_PAGE;
420 page = stt->pages[sttpage];
423 /* We allow any TCE, not just with read|write permissions */
427 page = kvm_spapr_get_tce_page(stt, sttpage);
431 tbl = page_to_virt(page);
433 tbl[idx % TCES_PER_PAGE] = tce;
436 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
437 struct iommu_table *tbl, unsigned long entry)
440 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
441 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
443 for (i = 0; i < subpages; ++i) {
444 unsigned long hpa = 0;
445 enum dma_data_direction dir = DMA_NONE;
447 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
451 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
452 struct iommu_table *tbl, unsigned long entry)
454 struct mm_iommu_table_group_mem_t *mem = NULL;
455 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
456 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
461 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
465 mm_iommu_mapped_dec(mem);
467 *pua = cpu_to_be64(0);
472 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
473 struct iommu_table *tbl, unsigned long entry)
475 enum dma_data_direction dir = DMA_NONE;
476 unsigned long hpa = 0;
479 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
486 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
487 if (ret != H_SUCCESS)
488 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
493 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
494 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
497 unsigned long i, ret = H_SUCCESS;
498 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
499 unsigned long io_entry = entry * subpages;
501 for (i = 0; i < subpages; ++i) {
502 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
503 if (ret != H_SUCCESS)
507 iommu_tce_kill(tbl, io_entry, subpages);
512 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
513 unsigned long entry, unsigned long ua,
514 enum dma_data_direction dir)
518 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
519 struct mm_iommu_table_group_mem_t *mem;
522 /* it_userspace allocation might be delayed */
525 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
527 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
530 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
533 if (mm_iommu_mapped_inc(mem))
536 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
537 if (WARN_ON_ONCE(ret)) {
538 mm_iommu_mapped_dec(mem);
543 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
545 *pua = cpu_to_be64(ua);
550 static long kvmppc_tce_iommu_map(struct kvm *kvm,
551 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
552 unsigned long entry, unsigned long ua,
553 enum dma_data_direction dir)
555 unsigned long i, pgoff, ret = H_SUCCESS;
556 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
557 unsigned long io_entry = entry * subpages;
559 for (i = 0, pgoff = 0; i < subpages;
560 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
562 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
563 io_entry + i, ua + pgoff, dir);
564 if (ret != H_SUCCESS)
568 iommu_tce_kill(tbl, io_entry, subpages);
573 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
574 unsigned long ioba, unsigned long tce)
576 struct kvmppc_spapr_tce_table *stt;
578 struct kvmppc_spapr_tce_iommu_table *stit;
579 unsigned long entry, ua = 0;
580 enum dma_data_direction dir;
582 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
583 /* liobn, ioba, tce); */
585 stt = kvmppc_find_table(vcpu->kvm, liobn);
589 ret = kvmppc_ioba_validate(stt, ioba, 1);
590 if (ret != H_SUCCESS)
593 idx = srcu_read_lock(&vcpu->kvm->srcu);
595 ret = kvmppc_tce_validate(stt, tce);
596 if (ret != H_SUCCESS)
599 dir = iommu_tce_direction(tce);
601 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
606 entry = ioba >> stt->page_shift;
608 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
610 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
613 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
617 if (ret != H_SUCCESS) {
618 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
623 kvmppc_tce_put(stt, entry, tce);
626 srcu_read_unlock(&vcpu->kvm->srcu, idx);
630 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
632 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
633 unsigned long liobn, unsigned long ioba,
634 unsigned long tce_list, unsigned long npages)
636 struct kvmppc_spapr_tce_table *stt;
637 long i, ret = H_SUCCESS, idx;
638 unsigned long entry, ua = 0;
641 struct kvmppc_spapr_tce_iommu_table *stit;
643 stt = kvmppc_find_table(vcpu->kvm, liobn);
647 entry = ioba >> stt->page_shift;
649 * SPAPR spec says that the maximum size of the list is 512 TCEs
650 * so the whole table fits in 4K page
655 if (tce_list & (SZ_4K - 1))
658 ret = kvmppc_ioba_validate(stt, ioba, npages);
659 if (ret != H_SUCCESS)
662 idx = srcu_read_lock(&vcpu->kvm->srcu);
663 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
667 tces = (u64 __user *) ua;
669 for (i = 0; i < npages; ++i) {
670 if (get_user(tce, tces + i)) {
674 tce = be64_to_cpu(tce);
676 ret = kvmppc_tce_validate(stt, tce);
677 if (ret != H_SUCCESS)
681 for (i = 0; i < npages; ++i) {
683 * This looks unsafe, because we validate, then regrab
684 * the TCE from userspace which could have been changed by
687 * But it actually is safe, because the relevant checks will be
688 * re-executed in the following code. If userspace tries to
689 * change this dodgily it will result in a messier failure mode
690 * but won't threaten the host.
692 if (get_user(tce, tces + i)) {
696 tce = be64_to_cpu(tce);
698 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
703 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
704 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
705 stit->tbl, entry + i, ua,
706 iommu_tce_direction(tce));
708 if (ret != H_SUCCESS) {
709 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
715 kvmppc_tce_put(stt, entry + i, tce);
719 srcu_read_unlock(&vcpu->kvm->srcu, idx);
723 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
725 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
726 unsigned long liobn, unsigned long ioba,
727 unsigned long tce_value, unsigned long npages)
729 struct kvmppc_spapr_tce_table *stt;
731 struct kvmppc_spapr_tce_iommu_table *stit;
733 stt = kvmppc_find_table(vcpu->kvm, liobn);
737 ret = kvmppc_ioba_validate(stt, ioba, npages);
738 if (ret != H_SUCCESS)
741 /* Check permission bits only to allow userspace poison TCE for debug */
742 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
745 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
746 unsigned long entry = ioba >> stt->page_shift;
748 for (i = 0; i < npages; ++i) {
749 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
750 stit->tbl, entry + i);
752 if (ret == H_SUCCESS)
755 if (ret == H_TOO_HARD)
759 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
763 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
764 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
768 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
770 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
773 struct kvmppc_spapr_tce_table *stt;
779 stt = kvmppc_find_table(vcpu->kvm, liobn);
783 ret = kvmppc_ioba_validate(stt, ioba, 1);
784 if (ret != H_SUCCESS)
787 idx = (ioba >> stt->page_shift) - stt->offset;
788 page = stt->pages[idx / TCES_PER_PAGE];
790 kvmppc_set_gpr(vcpu, 4, 0);
793 tbl = (u64 *)page_address(page);
795 kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
799 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);