Merge branch kvm-arm64/pkvm-vcpu-state into kvmarm-master/next
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / nvhe / mm.c
index 96193cb..318298e 100644 (file)
@@ -14,6 +14,7 @@
 #include <nvhe/early_alloc.h>
 #include <nvhe/gfp.h>
 #include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
 #include <nvhe/mm.h>
 #include <nvhe/spinlock.h>
 
@@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr;
 
 static u64 __io_map_base;
 
+struct hyp_fixmap_slot {
+       u64 addr;
+       kvm_pte_t *ptep;
+};
+static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
+
 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
                                  unsigned long phys, enum kvm_pgtable_prot prot)
 {
@@ -129,13 +136,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
        return ret;
 }
 
-int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
+int hyp_back_vmemmap(phys_addr_t back)
 {
-       unsigned long start, end;
+       unsigned long i, start, size, end = 0;
+       int ret;
 
-       hyp_vmemmap_range(phys, size, &start, &end);
+       for (i = 0; i < hyp_memblock_nr; i++) {
+               start = hyp_memory[i].base;
+               start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
+               /*
+                * The begining of the hyp_vmemmap region for the current
+                * memblock may already be backed by the page backing the end
+                * the previous region, so avoid mapping it twice.
+                */
+               start = max(start, end);
+
+               end = hyp_memory[i].base + hyp_memory[i].size;
+               end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
+               if (start >= end)
+                       continue;
+
+               size = end - start;
+               ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
+               if (ret)
+                       return ret;
+
+               memset(hyp_phys_to_virt(back), 0, size);
+               back += size;
+       }
 
-       return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
+       return 0;
 }
 
 static void *__hyp_bp_vect_base;
@@ -189,6 +219,102 @@ int hyp_map_vectors(void)
        return 0;
 }
 
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+       struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
+       kvm_pte_t pte, *ptep = slot->ptep;
+
+       pte = *ptep;
+       pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
+       pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
+       WRITE_ONCE(*ptep, pte);
+       dsb(ishst);
+
+       return (void *)slot->addr;
+}
+
+static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
+{
+       kvm_pte_t *ptep = slot->ptep;
+       u64 addr = slot->addr;
+
+       WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
+
+       /*
+        * Irritatingly, the architecture requires that we use inner-shareable
+        * broadcast TLB invalidation here in case another CPU speculates
+        * through our fixmap and decides to create an "amalagamation of the
+        * values held in the TLB" due to the apparent lack of a
+        * break-before-make sequence.
+        *
+        * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
+        */
+       dsb(ishst);
+       __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), (KVM_PGTABLE_MAX_LEVELS - 1));
+       dsb(ish);
+       isb();
+}
+
+void hyp_fixmap_unmap(void)
+{
+       fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
+}
+
+static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
+                                  enum kvm_pgtable_walk_flags visit)
+{
+       struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
+
+       if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_MAX_LEVELS - 1)
+               return -EINVAL;
+
+       slot->addr = ctx->addr;
+       slot->ptep = ctx->ptep;
+
+       /*
+        * Clear the PTE, but keep the page-table page refcount elevated to
+        * prevent it from ever being freed. This lets us manipulate the PTEs
+        * by hand safely without ever needing to allocate memory.
+        */
+       fixmap_clear_slot(slot);
+
+       return 0;
+}
+
+static int create_fixmap_slot(u64 addr, u64 cpu)
+{
+       struct kvm_pgtable_walker walker = {
+               .cb     = __create_fixmap_slot_cb,
+               .flags  = KVM_PGTABLE_WALK_LEAF,
+               .arg = (void *)cpu,
+       };
+
+       return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
+}
+
+int hyp_create_pcpu_fixmap(void)
+{
+       unsigned long addr, i;
+       int ret;
+
+       for (i = 0; i < hyp_nr_cpus; i++) {
+               ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
+               if (ret)
+                       return ret;
+
+               ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
+                                         __hyp_pa(__hyp_bss_start), PAGE_HYP);
+               if (ret)
+                       return ret;
+
+               ret = create_fixmap_slot(addr, i);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 int hyp_create_idmap(u32 hyp_va_bits)
 {
        unsigned long start, end;
@@ -213,3 +339,36 @@ int hyp_create_idmap(u32 hyp_va_bits)
 
        return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
 }
+
+static void *admit_host_page(void *arg)
+{
+       struct kvm_hyp_memcache *host_mc = arg;
+
+       if (!host_mc->nr_pages)
+               return NULL;
+
+       /*
+        * The host still owns the pages in its memcache, so we need to go
+        * through a full host-to-hyp donation cycle to change it. Fortunately,
+        * __pkvm_host_donate_hyp() takes care of races for us, so if it
+        * succeeds we're good to go.
+        */
+       if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+               return NULL;
+
+       return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
+}
+
+/* Refill our local memcache by poping pages from the one provided by the host. */
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+                   struct kvm_hyp_memcache *host_mc)
+{
+       struct kvm_hyp_memcache tmp = *host_mc;
+       int ret;
+
+       ret =  __topup_hyp_memcache(mc, min_pages, admit_host_page,
+                                   hyp_virt_to_phys, &tmp);
+       *host_mc = tmp;
+
+       return ret;
+}