1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/hugetlb.h>
17 #include <linux/list.h>
18 #include <linux/stringify.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_book3s.h>
22 #include <asm/book3s/64/mmu-hash.h>
23 #include <asm/mmu_context.h>
24 #include <asm/hvcall.h>
25 #include <asm/synch.h>
26 #include <asm/ppc-opcode.h>
28 #include <asm/iommu.h>
30 #include <asm/pte-walk.h>
34 #define WARN_ON_ONCE_RM(condition) ({ \
35 static bool __section(.data.unlikely) __warned; \
36 int __ret_warn_once = !!(condition); \
38 if (unlikely(__ret_warn_once && !__warned)) { \
40 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
41 __stringify(condition), \
42 __func__, __LINE__); \
45 unlikely(__ret_warn_once); \
50 #define WARN_ON_ONCE_RM(condition) ({ \
51 int __ret_warn_on = !!(condition); \
52 unlikely(__ret_warn_on); \
58 * Finds a TCE table descriptor by LIOBN.
60 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
63 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
66 struct kvmppc_spapr_tce_table *stt;
68 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
69 if (stt->liobn == liobn)
74 EXPORT_SYMBOL_GPL(kvmppc_find_table);
76 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
77 static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
78 unsigned long *ua, unsigned long **prmap)
80 unsigned long gfn = tce >> PAGE_SHIFT;
81 struct kvm_memory_slot *memslot;
83 memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
87 *ua = __gfn_to_hva_memslot(memslot, gfn) |
88 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
91 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
97 * Validates TCE address.
98 * At the moment flags and page mask are validated.
99 * As the host kernel does not access those addresses (just puts them
100 * to the table and user space is supposed to process them), we can skip
101 * checking other things (such as TCE is a guest RAM address or the page
102 * was actually allocated).
104 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
107 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
108 enum dma_data_direction dir = iommu_tce_direction(tce);
109 struct kvmppc_spapr_tce_iommu_table *stit;
110 unsigned long ua = 0;
112 /* Allow userspace to poison TCE table */
116 if (iommu_tce_check_gpa(stt->page_shift, gpa))
119 if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
122 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
123 unsigned long hpa = 0;
124 struct mm_iommu_table_group_mem_t *mem;
125 long shift = stit->tbl->it_page_shift;
127 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
131 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
138 /* Note on the use of page_address() in real mode,
140 * It is safe to use page_address() in real mode on ppc64 because
141 * page_address() is always defined as lowmem_page_address()
142 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
143 * operation and does not access page struct.
145 * Theoretically page_address() could be defined different
146 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
147 * would have to be enabled.
148 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
149 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
150 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
151 * is not expected to be enabled on ppc32, page_address()
152 * is safe for ppc32 as well.
154 * WARNING: This will be called in real-mode on HV KVM and virtual
157 static u64 *kvmppc_page_address(struct page *page)
159 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
160 #error TODO: fix to avoid page_address() here
162 return (u64 *) page_address(page);
166 * Handles TCE requests for emulated devices.
167 * Puts guest TCE values to the table and expects user space to convert them.
168 * Cannot fail so kvmppc_rm_tce_validate must be called before it.
170 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
171 unsigned long idx, unsigned long tce)
177 page = stt->pages[idx / TCES_PER_PAGE];
179 * page must not be NULL in real mode,
180 * kvmppc_rm_ioba_validate() must have taken care of this.
182 WARN_ON_ONCE_RM(!page);
183 tbl = kvmppc_page_address(page);
185 tbl[idx % TCES_PER_PAGE] = tce;
189 * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
191 * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
192 * allocated or not required (when clearing a tce entry).
194 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
195 unsigned long ioba, unsigned long npages, bool clearing)
197 unsigned long i, idx, sttpage, sttpages;
198 unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
203 * clearing==true says kvmppc_rm_tce_put won't be allocating pages
209 idx = (ioba >> stt->page_shift) - stt->offset;
210 sttpage = idx / TCES_PER_PAGE;
211 sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
213 for (i = sttpage; i < sttpage + sttpages; ++i)
220 static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
221 struct iommu_table *tbl,
222 unsigned long entry, unsigned long *hpa,
223 enum dma_data_direction *direction)
227 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
229 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
230 (*direction == DMA_BIDIRECTIONAL))) {
231 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
233 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
234 * calling this so we still get here a valid UA.
237 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
243 extern void iommu_tce_kill_rm(struct iommu_table *tbl,
244 unsigned long entry, unsigned long pages)
246 if (tbl->it_ops->tce_kill)
247 tbl->it_ops->tce_kill(tbl, entry, pages, true);
250 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
253 unsigned long hpa = 0;
254 enum dma_data_direction dir = DMA_NONE;
256 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
259 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
260 struct iommu_table *tbl, unsigned long entry)
262 struct mm_iommu_table_group_mem_t *mem = NULL;
263 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
264 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
267 /* it_userspace allocation might be delayed */
270 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
274 mm_iommu_mapped_dec(mem);
276 *pua = cpu_to_be64(0);
281 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
282 struct iommu_table *tbl, unsigned long entry)
284 enum dma_data_direction dir = DMA_NONE;
285 unsigned long hpa = 0;
288 if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
290 * real mode xchg can fail if struct page crosses
298 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
300 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
305 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
306 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
309 unsigned long i, ret = H_SUCCESS;
310 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
311 unsigned long io_entry = entry * subpages;
313 for (i = 0; i < subpages; ++i) {
314 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
315 if (ret != H_SUCCESS)
322 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
323 unsigned long entry, unsigned long ua,
324 enum dma_data_direction dir)
327 unsigned long hpa = 0;
328 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
329 struct mm_iommu_table_group_mem_t *mem;
332 /* it_userspace allocation might be delayed */
335 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
339 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
343 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
346 ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
348 mm_iommu_mapped_dec(mem);
350 * real mode xchg can fail if struct page crosses
357 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
359 *pua = cpu_to_be64(ua);
364 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
365 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
366 unsigned long entry, unsigned long ua,
367 enum dma_data_direction dir)
369 unsigned long i, pgoff, ret = H_SUCCESS;
370 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
371 unsigned long io_entry = entry * subpages;
373 for (i = 0, pgoff = 0; i < subpages;
374 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
376 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
377 io_entry + i, ua + pgoff, dir);
378 if (ret != H_SUCCESS)
385 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
386 unsigned long ioba, unsigned long tce)
388 struct kvmppc_spapr_tce_table *stt;
390 struct kvmppc_spapr_tce_iommu_table *stit;
391 unsigned long entry, ua = 0;
392 enum dma_data_direction dir;
394 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
395 /* liobn, ioba, tce); */
397 /* For radix, we might be in virtual mode, so punt */
398 if (kvm_is_radix(vcpu->kvm))
401 stt = kvmppc_find_table(vcpu->kvm, liobn);
405 ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
406 if (ret != H_SUCCESS)
409 ret = kvmppc_rm_tce_validate(stt, tce);
410 if (ret != H_SUCCESS)
413 dir = iommu_tce_direction(tce);
414 if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
417 entry = ioba >> stt->page_shift;
419 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
421 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
424 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
425 stit->tbl, entry, ua, dir);
427 iommu_tce_kill_rm(stit->tbl, entry, 1);
429 if (ret != H_SUCCESS) {
430 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
435 kvmppc_rm_tce_put(stt, entry, tce);
440 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
441 unsigned long ua, unsigned long *phpa)
447 * Called in real mode with MSR_EE = 0. We are safe here.
448 * It is ok to do the lookup with arch.pgdir here, because
449 * we are doing this on secondary cpus and current task there
450 * is not the hypervisor. Also this is safe against THP in the
451 * host, because an IPI to primary thread will wait for the secondary
452 * to exit which will agains result in the below page table walk
455 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
456 if (!ptep || !pte_present(*ptep))
463 /* Avoid handling anything potentially complicated in realmode */
464 if (shift > PAGE_SHIFT)
470 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
476 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
477 unsigned long liobn, unsigned long ioba,
478 unsigned long tce_list, unsigned long npages)
480 struct kvmppc_spapr_tce_table *stt;
481 long i, ret = H_SUCCESS;
482 unsigned long tces, entry, ua = 0;
483 unsigned long *rmap = NULL;
485 struct kvmppc_spapr_tce_iommu_table *stit;
487 /* For radix, we might be in virtual mode, so punt */
488 if (kvm_is_radix(vcpu->kvm))
491 stt = kvmppc_find_table(vcpu->kvm, liobn);
495 entry = ioba >> stt->page_shift;
497 * The spec says that the maximum size of the list is 512 TCEs
498 * so the whole table addressed resides in 4K page
503 if (tce_list & (SZ_4K - 1))
506 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
507 if (ret != H_SUCCESS)
510 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
512 * We get here if guest memory was pre-registered which
513 * is normally VFIO case and gpa->hpa translation does not
516 struct mm_iommu_table_group_mem_t *mem;
518 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
521 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
523 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
524 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
529 * This is usually a case of a guest with emulated devices only
530 * when TCE list is not in preregistered memory.
531 * We do not require memory to be preregistered in this case
532 * so lock rmap and do __find_linux_pte_or_hugepte().
534 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
537 rmap = (void *) vmalloc_to_phys(rmap);
538 if (WARN_ON_ONCE_RM(!rmap))
542 * Synchronize with the MMU notifier callbacks in
543 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
544 * While we have the rmap lock, code running on other CPUs
545 * cannot finish unmapping the host real page that backs
546 * this guest real page, so we are OK to access the host
550 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
556 for (i = 0; i < npages; ++i) {
557 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
559 ret = kvmppc_rm_tce_validate(stt, tce);
560 if (ret != H_SUCCESS)
564 for (i = 0; i < npages; ++i) {
565 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
568 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
570 goto invalidate_exit;
573 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
574 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
575 stit->tbl, entry + i, ua,
576 iommu_tce_direction(tce));
578 if (ret != H_SUCCESS) {
579 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
581 goto invalidate_exit;
585 kvmppc_rm_tce_put(stt, entry + i, tce);
589 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
590 iommu_tce_kill_rm(stit->tbl, entry, npages);
599 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
600 unsigned long liobn, unsigned long ioba,
601 unsigned long tce_value, unsigned long npages)
603 struct kvmppc_spapr_tce_table *stt;
605 struct kvmppc_spapr_tce_iommu_table *stit;
607 /* For radix, we might be in virtual mode, so punt */
608 if (kvm_is_radix(vcpu->kvm))
611 stt = kvmppc_find_table(vcpu->kvm, liobn);
615 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
616 if (ret != H_SUCCESS)
619 /* Check permission bits only to allow userspace poison TCE for debug */
620 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
623 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
624 unsigned long entry = ioba >> stt->page_shift;
626 for (i = 0; i < npages; ++i) {
627 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
628 stit->tbl, entry + i);
630 if (ret == H_SUCCESS)
633 if (ret == H_TOO_HARD)
634 goto invalidate_exit;
637 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
641 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
642 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
645 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
646 iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
651 /* This can be called in either virtual mode or real mode */
652 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
655 struct kvmppc_spapr_tce_table *stt;
661 stt = kvmppc_find_table(vcpu->kvm, liobn);
665 ret = kvmppc_ioba_validate(stt, ioba, 1);
666 if (ret != H_SUCCESS)
669 idx = (ioba >> stt->page_shift) - stt->offset;
670 page = stt->pages[idx / TCES_PER_PAGE];
672 vcpu->arch.regs.gpr[4] = 0;
675 tbl = (u64 *)page_address(page);
677 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
681 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
683 #endif /* KVM_BOOK3S_HV_POSSIBLE */