2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <linux/uaccess.h>
15 #include <asm/mmu_context.h>
16 #include <asm/pgalloc.h>
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20 * for which pages need to be cached.
22 #if defined(__PAGETABLE_PMD_FOLDED)
23 #define KVM_MMU_CACHE_MIN_PAGES 1
25 #define KVM_MMU_CACHE_MIN_PAGES 2
28 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
30 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
34 * kvm_pgd_init() - Initialise KVM GPA page directory.
35 * @page: Pointer to page directory (PGD) for KVM GPA.
37 * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
38 * representing no mappings. This is similar to pgd_init(), however it
39 * initialises all the page directory pointers, not just the ones corresponding
40 * to the userland address space (since it is for the guest physical address
41 * space rather than a virtual address space).
43 static void kvm_pgd_init(void *page)
45 unsigned long *p, *end;
48 #ifdef __PAGETABLE_PMD_FOLDED
49 entry = (unsigned long)invalid_pte_table;
51 entry = (unsigned long)invalid_pmd_table;
54 p = (unsigned long *)page;
55 end = p + PTRS_PER_PGD;
71 * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
73 * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
74 * to host physical page mappings.
76 * Returns: Pointer to new KVM GPA page directory.
77 * NULL on allocation failure.
79 pgd_t *kvm_pgd_alloc(void)
83 ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
91 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
92 * @pgd: Page directory pointer.
93 * @addr: Address to index page table using.
94 * @cache: MMU page cache to allocate new page tables from, or NULL.
96 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
97 * address @addr. If page tables don't exist for @addr, they will be created
98 * from the MMU cache if @cache is not NULL.
100 * Returns: Pointer to pte_t corresponding to @addr.
101 * NULL if a page table doesn't exist for @addr and !@cache.
102 * NULL if a page table allocation failed.
104 static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
111 pgd += pgd_index(addr);
112 if (pgd_none(*pgd)) {
113 /* Not used on MIPS yet */
117 p4d = p4d_offset(pgd, addr);
118 pud = pud_offset(p4d, addr);
119 if (pud_none(*pud)) {
124 new_pmd = kvm_mmu_memory_cache_alloc(cache);
125 pmd_init((unsigned long)new_pmd,
126 (unsigned long)invalid_pte_table);
127 pud_populate(NULL, pud, new_pmd);
129 pmd = pmd_offset(pud, addr);
130 if (pmd_none(*pmd)) {
135 new_pte = kvm_mmu_memory_cache_alloc(cache);
137 pmd_populate_kernel(NULL, pmd, new_pte);
139 return pte_offset_kernel(pmd, addr);
142 /* Caller must hold kvm->mm_lock */
143 static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
144 struct kvm_mmu_memory_cache *cache,
147 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
151 * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
152 * Flush a range of guest physical address space from the VM's GPA page tables.
155 static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
156 unsigned long end_gpa)
158 int i_min = pte_index(start_gpa);
159 int i_max = pte_index(end_gpa);
160 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
163 for (i = i_min; i <= i_max; ++i) {
164 if (!pte_present(pte[i]))
167 set_pte(pte + i, __pte(0));
169 return safe_to_remove;
172 static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
173 unsigned long end_gpa)
176 unsigned long end = ~0ul;
177 int i_min = pmd_index(start_gpa);
178 int i_max = pmd_index(end_gpa);
179 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
182 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
183 if (!pmd_present(pmd[i]))
186 pte = pte_offset_kernel(pmd + i, 0);
190 if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
192 pte_free_kernel(NULL, pte);
194 safe_to_remove = false;
197 return safe_to_remove;
200 static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
201 unsigned long end_gpa)
204 unsigned long end = ~0ul;
205 int i_min = pud_index(start_gpa);
206 int i_max = pud_index(end_gpa);
207 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
210 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
211 if (!pud_present(pud[i]))
214 pmd = pmd_offset(pud + i, 0);
218 if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
222 safe_to_remove = false;
225 return safe_to_remove;
228 static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
229 unsigned long end_gpa)
233 unsigned long end = ~0ul;
234 int i_min = pgd_index(start_gpa);
235 int i_max = pgd_index(end_gpa);
236 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
239 for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
240 if (!pgd_present(pgd[i]))
243 p4d = p4d_offset(pgd, 0);
244 pud = pud_offset(p4d + i, 0);
248 if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
252 safe_to_remove = false;
255 return safe_to_remove;
259 * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
261 * @start_gfn: Guest frame number of first page in GPA range to flush.
262 * @end_gfn: Guest frame number of last page in GPA range to flush.
264 * Flushes a range of GPA mappings from the GPA page tables.
266 * The caller must hold the @kvm->mmu_lock spinlock.
268 * Returns: Whether its safe to remove the top level page directory because
269 * all lower levels have been removed.
271 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
273 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
274 start_gfn << PAGE_SHIFT,
275 end_gfn << PAGE_SHIFT);
278 #define BUILD_PTE_RANGE_OP(name, op) \
279 static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
283 int i_min = pte_index(start); \
284 int i_max = pte_index(end); \
288 for (i = i_min; i <= i_max; ++i) { \
289 if (!pte_present(pte[i])) \
294 if (pte_val(new) == pte_val(old)) \
296 set_pte(pte + i, new); \
302 /* returns true if anything was done */ \
303 static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
308 unsigned long cur_end = ~0ul; \
309 int i_min = pmd_index(start); \
310 int i_max = pmd_index(end); \
313 for (i = i_min; i <= i_max; ++i, start = 0) { \
314 if (!pmd_present(pmd[i])) \
317 pte = pte_offset_kernel(pmd + i, 0); \
321 ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
326 static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
331 unsigned long cur_end = ~0ul; \
332 int i_min = pud_index(start); \
333 int i_max = pud_index(end); \
336 for (i = i_min; i <= i_max; ++i, start = 0) { \
337 if (!pud_present(pud[i])) \
340 pmd = pmd_offset(pud + i, 0); \
344 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
349 static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
355 unsigned long cur_end = ~0ul; \
356 int i_min = pgd_index(start); \
357 int i_max = pgd_index(end); \
360 for (i = i_min; i <= i_max; ++i, start = 0) { \
361 if (!pgd_present(pgd[i])) \
364 p4d = p4d_offset(pgd, 0); \
365 pud = pud_offset(p4d + i, 0); \
369 ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
375 * kvm_mips_mkclean_gpa_pt.
376 * Mark a range of guest physical address space clean (writes fault) in the VM's
377 * GPA page table to allow dirty page tracking.
380 BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
383 * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
385 * @start_gfn: Guest frame number of first page in GPA range to flush.
386 * @end_gfn: Guest frame number of last page in GPA range to flush.
388 * Make a range of GPA mappings clean so that guest writes will fault and
389 * trigger dirty page logging.
391 * The caller must hold the @kvm->mmu_lock spinlock.
393 * Returns: Whether any GPA mappings were modified, which would require
394 * derived mappings (GVA page tables & TLB enties) to be
397 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
399 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
400 start_gfn << PAGE_SHIFT,
401 end_gfn << PAGE_SHIFT);
405 * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
406 * @kvm: The KVM pointer
407 * @slot: The memory slot associated with mask
408 * @gfn_offset: The gfn offset in memory slot
409 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
410 * slot to be write protected
412 * Walks bits set in mask write protects the associated pte's. Caller must
413 * acquire @kvm->mmu_lock.
415 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
416 struct kvm_memory_slot *slot,
417 gfn_t gfn_offset, unsigned long mask)
419 gfn_t base_gfn = slot->base_gfn + gfn_offset;
420 gfn_t start = base_gfn + __ffs(mask);
421 gfn_t end = base_gfn + __fls(mask);
423 kvm_mips_mkclean_gpa_pt(kvm, start, end);
427 * kvm_mips_mkold_gpa_pt.
428 * Mark a range of guest physical address space old (all accesses fault) in the
429 * VM's GPA page table to allow detection of commonly used pages.
432 BUILD_PTE_RANGE_OP(mkold, pte_mkold)
434 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
437 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
438 start_gfn << PAGE_SHIFT,
439 end_gfn << PAGE_SHIFT);
442 static int handle_hva_to_gpa(struct kvm *kvm,
445 int (*handler)(struct kvm *kvm, gfn_t gfn,
447 struct kvm_memory_slot *memslot,
451 struct kvm_memslots *slots;
452 struct kvm_memory_slot *memslot;
455 slots = kvm_memslots(kvm);
457 /* we only care about the pages that the guest sees */
458 kvm_for_each_memslot(memslot, slots) {
459 unsigned long hva_start, hva_end;
462 hva_start = max(start, memslot->userspace_addr);
463 hva_end = min(end, memslot->userspace_addr +
464 (memslot->npages << PAGE_SHIFT));
465 if (hva_start >= hva_end)
469 * {gfn(page) | page intersects with [hva_start, hva_end)} =
470 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
472 gfn = hva_to_gfn_memslot(hva_start, memslot);
473 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
475 ret |= handler(kvm, gfn, gfn_end, memslot, data);
482 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
483 struct kvm_memory_slot *memslot, void *data)
485 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
489 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
492 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
494 kvm_mips_callbacks->prepare_flush_shadow(kvm);
495 kvm_flush_remote_tlbs(kvm);
499 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
500 struct kvm_memory_slot *memslot, void *data)
502 gpa_t gpa = gfn << PAGE_SHIFT;
503 pte_t hva_pte = *(pte_t *)data;
504 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
510 /* Mapping may need adjusting depending on memslot flags */
512 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
513 hva_pte = pte_mkclean(hva_pte);
514 else if (memslot->flags & KVM_MEM_READONLY)
515 hva_pte = pte_wrprotect(hva_pte);
517 set_pte(gpa_pte, hva_pte);
519 /* Replacing an absent or old page doesn't need flushes */
520 if (!pte_present(old_pte) || !pte_young(old_pte))
523 /* Pages swapped, aged, moved, or cleaned require flushes */
524 return !pte_present(hva_pte) ||
525 !pte_young(hva_pte) ||
526 pte_pfn(old_pte) != pte_pfn(hva_pte) ||
527 (pte_dirty(old_pte) && !pte_dirty(hva_pte));
530 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
532 unsigned long end = hva + PAGE_SIZE;
535 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
537 kvm_mips_callbacks->prepare_flush_shadow(kvm);
538 kvm_flush_remote_tlbs(kvm);
543 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
544 struct kvm_memory_slot *memslot, void *data)
546 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
549 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
550 struct kvm_memory_slot *memslot, void *data)
552 gpa_t gpa = gfn << PAGE_SHIFT;
553 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
557 return pte_young(*gpa_pte);
560 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
562 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
565 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
567 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
571 * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
572 * @vcpu: VCPU pointer.
573 * @gpa: Guest physical address of fault.
574 * @write_fault: Whether the fault was due to a write.
575 * @out_entry: New PTE for @gpa (written on success unless NULL).
576 * @out_buddy: New PTE for @gpa's buddy (written on success unless
579 * Perform fast path GPA fault handling, doing all that can be done without
580 * calling into KVM. This handles marking old pages young (for idle page
581 * tracking), and dirtying of clean pages (for dirty page logging).
583 * Returns: 0 on success, in which case we can update derived mappings and
584 * resume guest execution.
585 * -EFAULT on failure due to absent GPA mapping or write to
586 * read-only page, in which case KVM must be consulted.
588 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
590 pte_t *out_entry, pte_t *out_buddy)
592 struct kvm *kvm = vcpu->kvm;
593 gfn_t gfn = gpa >> PAGE_SHIFT;
595 kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
596 bool pfn_valid = false;
599 spin_lock(&kvm->mmu_lock);
601 /* Fast path - just check GPA page table for an existing entry */
602 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
603 if (!ptep || !pte_present(*ptep)) {
608 /* Track access to pages marked old */
609 if (!pte_young(*ptep)) {
610 set_pte(ptep, pte_mkyoung(*ptep));
611 pfn = pte_pfn(*ptep);
613 /* call kvm_set_pfn_accessed() after unlock */
615 if (write_fault && !pte_dirty(*ptep)) {
616 if (!pte_write(*ptep)) {
621 /* Track dirtying of writeable pages */
622 set_pte(ptep, pte_mkdirty(*ptep));
623 pfn = pte_pfn(*ptep);
624 mark_page_dirty(kvm, gfn);
625 kvm_set_pfn_dirty(pfn);
631 *out_buddy = *ptep_buddy(ptep);
634 spin_unlock(&kvm->mmu_lock);
636 kvm_set_pfn_accessed(pfn);
641 * kvm_mips_map_page() - Map a guest physical page.
642 * @vcpu: VCPU pointer.
643 * @gpa: Guest physical address of fault.
644 * @write_fault: Whether the fault was due to a write.
645 * @out_entry: New PTE for @gpa (written on success unless NULL).
646 * @out_buddy: New PTE for @gpa's buddy (written on success unless
649 * Handle GPA faults by creating a new GPA mapping (or updating an existing
652 * This takes care of marking pages young or dirty (idle/dirty page tracking),
653 * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
654 * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
657 * Returns: 0 on success, in which case the caller may use the @out_entry
658 * and @out_buddy PTEs to update derived mappings and resume guest
660 * -EFAULT if there is no memory region at @gpa or a write was
661 * attempted to a read-only memory region. This is usually handled
664 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
666 pte_t *out_entry, pte_t *out_buddy)
668 struct kvm *kvm = vcpu->kvm;
669 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
670 gfn_t gfn = gpa >> PAGE_SHIFT;
673 pte_t *ptep, entry, old_pte;
675 unsigned long prot_bits;
676 unsigned long mmu_seq;
678 /* Try the fast path to handle old / clean pages */
679 srcu_idx = srcu_read_lock(&kvm->srcu);
680 err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
685 /* We need a minimum of cached pages ready for page table creation */
686 err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
692 * Used to check for invalidations in progress, of the pfn that is
693 * returned by pfn_to_pfn_prot below.
695 mmu_seq = kvm->mmu_notifier_seq;
697 * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
698 * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
699 * risk the page we get a reference to getting unmapped before we have a
700 * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
702 * This smp_rmb() pairs with the effective smp_wmb() of the combination
703 * of the pte_unmap_unlock() after the PTE is zapped, and the
704 * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
705 * mmu_notifier_seq is incremented.
709 /* Slow path - ask KVM core whether we can access this GPA */
710 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
711 if (is_error_noslot_pfn(pfn)) {
716 spin_lock(&kvm->mmu_lock);
717 /* Check if an invalidation has taken place since we got pfn */
718 if (mmu_notifier_retry(kvm, mmu_seq)) {
720 * This can happen when mappings are changed asynchronously, but
721 * also synchronously if a COW is triggered by
724 spin_unlock(&kvm->mmu_lock);
725 kvm_release_pfn_clean(pfn);
729 /* Ensure page tables are allocated */
730 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
733 prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
735 prot_bits |= _PAGE_WRITE;
737 prot_bits |= __WRITEABLE;
738 mark_page_dirty(kvm, gfn);
739 kvm_set_pfn_dirty(pfn);
742 entry = pfn_pte(pfn, __pgprot(prot_bits));
746 set_pte(ptep, entry);
752 *out_buddy = *ptep_buddy(ptep);
754 spin_unlock(&kvm->mmu_lock);
755 kvm_release_pfn_clean(pfn);
756 kvm_set_pfn_accessed(pfn);
758 srcu_read_unlock(&kvm->srcu, srcu_idx);
762 static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
765 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
769 /* We need a minimum of cached pages ready for page table creation */
770 ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
774 if (KVM_GUEST_KERNEL_MODE(vcpu))
775 pgdp = vcpu->arch.guest_kernel_mm.pgd;
777 pgdp = vcpu->arch.guest_user_mm.pgd;
779 return kvm_mips_walk_pgd(pgdp, memcache, addr);
782 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
788 addr &= PAGE_MASK << 1;
790 pgdp = vcpu->arch.guest_kernel_mm.pgd;
791 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
793 ptep[0] = pfn_pte(0, __pgprot(0));
794 ptep[1] = pfn_pte(0, __pgprot(0));
798 pgdp = vcpu->arch.guest_user_mm.pgd;
799 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
801 ptep[0] = pfn_pte(0, __pgprot(0));
802 ptep[1] = pfn_pte(0, __pgprot(0));
808 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
809 * Flush a range of guest physical address space from the VM's GPA page tables.
812 static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
813 unsigned long end_gva)
815 int i_min = pte_index(start_gva);
816 int i_max = pte_index(end_gva);
817 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
821 * There's no freeing to do, so there's no point clearing individual
822 * entries unless only part of the last level page table needs flushing.
827 for (i = i_min; i <= i_max; ++i) {
828 if (!pte_present(pte[i]))
831 set_pte(pte + i, __pte(0));
836 static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
837 unsigned long end_gva)
840 unsigned long end = ~0ul;
841 int i_min = pmd_index(start_gva);
842 int i_max = pmd_index(end_gva);
843 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
846 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
847 if (!pmd_present(pmd[i]))
850 pte = pte_offset_kernel(pmd + i, 0);
854 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
856 pte_free_kernel(NULL, pte);
858 safe_to_remove = false;
861 return safe_to_remove;
864 static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
865 unsigned long end_gva)
868 unsigned long end = ~0ul;
869 int i_min = pud_index(start_gva);
870 int i_max = pud_index(end_gva);
871 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
874 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
875 if (!pud_present(pud[i]))
878 pmd = pmd_offset(pud + i, 0);
882 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
886 safe_to_remove = false;
889 return safe_to_remove;
892 static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
893 unsigned long end_gva)
897 unsigned long end = ~0ul;
898 int i_min = pgd_index(start_gva);
899 int i_max = pgd_index(end_gva);
900 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
903 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
904 if (!pgd_present(pgd[i]))
907 p4d = p4d_offset(pgd, 0);
908 pud = pud_offset(p4d + i, 0);
912 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
916 safe_to_remove = false;
919 return safe_to_remove;
922 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
924 if (flags & KMF_GPA) {
925 /* all of guest virtual address space could be affected */
926 if (flags & KMF_KERN)
927 /* useg, kseg0, seg2/3 */
928 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
931 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
934 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
937 if (flags & KMF_KERN)
938 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
942 static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
945 * Don't leak writeable but clean entries from GPA page tables. We don't
946 * want the normal Linux tlbmod handler to handle dirtying when KVM
947 * accesses guest memory.
950 pte = pte_wrprotect(pte);
955 static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
957 /* Guest EntryLo overrides host EntryLo */
958 if (!(entrylo & ENTRYLO_D))
959 pte = pte_mkclean(pte);
961 return kvm_mips_gpa_pte_to_gva_unmapped(pte);
964 #ifdef CONFIG_KVM_MIPS_VZ
965 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
966 struct kvm_vcpu *vcpu,
971 ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
975 /* Invalidate this entry in the TLB */
976 return kvm_vz_host_tlb_inv(vcpu, badvaddr);
980 /* XXXKYMA: Must be called with interrupts disabled */
981 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
982 struct kvm_vcpu *vcpu,
986 pte_t pte_gpa[2], *ptep_gva;
989 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
990 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
991 kvm_mips_dump_host_tlbs();
995 /* Get the GPA page table entry */
996 gpa = KVM_GUEST_CPHYSADDR(badvaddr);
997 idx = (badvaddr >> PAGE_SHIFT) & 1;
998 if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
1002 /* Get the GVA page table entry */
1003 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
1005 kvm_err("No ptep for gva %lx\n", badvaddr);
1009 /* Copy a pair of entries from GPA page table to GVA page table */
1010 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1011 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
1013 /* Invalidate this entry in the TLB, guest kernel ASID only */
1014 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1018 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
1019 struct kvm_mips_tlb *tlb,
1023 struct kvm *kvm = vcpu->kvm;
1025 pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1026 unsigned int idx = TLB_LO_IDX(*tlb, gva);
1027 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
1029 tlb_lo[0] = tlb->tlb_lo[0];
1030 tlb_lo[1] = tlb->tlb_lo[1];
1033 * The commpage address must not be mapped to anything else if the guest
1034 * TLB contains entries nearby, or commpage accesses will break.
1036 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1037 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
1039 /* Get the GPA page table entry */
1040 if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1041 write_fault, &pte_gpa[idx], NULL) < 0)
1044 /* And its GVA buddy's GPA page table entry if it also exists */
1045 pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1046 if (tlb_lo[!idx] & ENTRYLO_V) {
1047 spin_lock(&kvm->mmu_lock);
1048 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1049 mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1051 pte_gpa[!idx] = *ptep_buddy;
1052 spin_unlock(&kvm->mmu_lock);
1055 /* Get the GVA page table entry pair */
1056 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
1058 kvm_err("No ptep for gva %lx\n", gva);
1062 /* Copy a pair of entries from GPA page table to GVA page table */
1063 ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1064 ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
1066 /* Invalidate this entry in the TLB, current guest mode ASID only */
1067 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
1069 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
1070 tlb->tlb_lo[0], tlb->tlb_lo[1]);
1075 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1076 struct kvm_vcpu *vcpu)
1082 ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1084 kvm_err("No ptep for commpage %lx\n", badvaddr);
1088 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1089 /* Also set valid and dirty, so refill handler doesn't have to */
1090 prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED);
1091 *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot)));
1093 /* Invalidate this entry in the TLB, guest kernel ASID only */
1094 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1099 * kvm_mips_migrate_count() - Migrate timer.
1100 * @vcpu: Virtual CPU.
1102 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1103 * if it was running prior to being cancelled.
1105 * Must be called when the VCPU is migrated to a different CPU to ensure that
1106 * timer expiry during guest execution interrupts the guest and causes the
1107 * interrupt to be delivered in a timely manner.
1109 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1111 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1112 hrtimer_restart(&vcpu->arch.comparecount_timer);
1115 /* Restore ASID once we are scheduled back after preemption */
1116 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1118 unsigned long flags;
1120 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1122 local_irq_save(flags);
1125 if (vcpu->arch.last_sched_cpu != cpu) {
1126 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1127 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1129 * Migrate the timer interrupt to the current CPU so that it
1130 * always interrupts the guest and synchronously triggers a
1131 * guest timer interrupt.
1133 kvm_mips_migrate_count(vcpu);
1136 /* restore guest state to registers */
1137 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
1139 local_irq_restore(flags);
1142 /* ASID can change if another task is scheduled during preemption */
1143 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1145 unsigned long flags;
1148 local_irq_save(flags);
1150 cpu = smp_processor_id();
1151 vcpu->arch.last_sched_cpu = cpu;
1154 /* save guest state in registers */
1155 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
1157 local_irq_restore(flags);
1161 * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1162 * @vcpu: Virtual CPU.
1163 * @gva: Guest virtual address to be accessed.
1164 * @write: True if write attempted (must be dirtied and made writable).
1166 * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1167 * dirtying the page if @write so that guest instructions can be modified.
1169 * Returns: KVM_MIPS_MAPPED on success.
1170 * KVM_MIPS_GVA if bad guest virtual address.
1171 * KVM_MIPS_GPA if bad guest physical address.
1172 * KVM_MIPS_TLB if guest TLB not present.
1173 * KVM_MIPS_TLBINV if guest TLB present but not valid.
1174 * KVM_MIPS_TLBMOD if guest TLB read only.
1176 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1180 struct mips_coproc *cop0 = vcpu->arch.cop0;
1181 struct kvm_mips_tlb *tlb;
1184 if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
1185 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1186 return KVM_MIPS_GPA;
1187 } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1188 KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1189 /* Address should be in the guest TLB */
1190 index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1191 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1193 return KVM_MIPS_TLB;
1194 tlb = &vcpu->arch.guest_tlb[index];
1196 /* Entry should be valid, and dirty for writes */
1197 if (!TLB_IS_VALID(*tlb, gva))
1198 return KVM_MIPS_TLBINV;
1199 if (write && !TLB_IS_DIRTY(*tlb, gva))
1200 return KVM_MIPS_TLBMOD;
1202 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1203 return KVM_MIPS_GPA;
1205 return KVM_MIPS_GVA;
1208 return KVM_MIPS_MAPPED;
1211 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
1215 if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1216 "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1220 kvm_trap_emul_gva_lockless_begin(vcpu);
1221 err = get_user(*out, opc);
1222 kvm_trap_emul_gva_lockless_end(vcpu);
1224 if (unlikely(err)) {
1226 * Try to handle the fault, maybe we just raced with a GVA
1229 err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1231 if (unlikely(err)) {
1232 kvm_err("%s: illegal address: %p\n",
1237 /* Hopefully it'll work now */