Merge branch 'topic/ppc-kvm' into next
[linux-2.6-microblaze.git] / arch / powerpc / kvm / book3s_hv_nested.c
index c943a05..0644732 100644 (file)
@@ -261,8 +261,7 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
        /*
         * Don't let L1 change LPCR bits for the L2 except these:
         */
-       mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
-               LPCR_LPES | LPCR_MER;
+       mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
 
        /*
         * Additional filtering is required depending on hardware
@@ -439,10 +438,11 @@ long kvmhv_nested_init(void)
        if (!radix_enabled())
                return -ENODEV;
 
-       /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
-       ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
-       if (ptb_order < 8)
-               ptb_order = 8;
+       /* Partition table entry is 1<<4 bytes in size, hence the 4. */
+       ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
+       /* Minimum partition table size is 1<<12 bytes */
+       if (ptb_order < 12)
+               ptb_order = 12;
        pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
                                       GFP_KERNEL);
        if (!pseries_partition_tb) {
@@ -450,7 +450,7 @@ long kvmhv_nested_init(void)
                return -ENOMEM;
        }
 
-       ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+       ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
        rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
        if (rc != H_SUCCESS) {
                pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
@@ -521,11 +521,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
        kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
 }
 
-void kvmhv_vm_nested_init(struct kvm *kvm)
-{
-       kvm->arch.max_nested_lpid = -1;
-}
-
 /*
  * Handle the H_SET_PARTITION_TABLE hcall.
  * r4 = guest real address of partition table + log_2(size) - 12
@@ -539,16 +534,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
        long ret = H_SUCCESS;
 
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       /*
-        * Limit the partition table to 4096 entries (because that's what
-        * hardware supports), and check the base address.
-        */
-       if ((ptcr & PRTS_MASK) > 12 - 8 ||
+       /* Check partition size and base address. */
+       if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
            !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
                ret = H_PARAMETER;
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        if (ret == H_SUCCESS)
                kvm->arch.l1_ptcr = ptcr;
+
        return ret;
 }
 
@@ -644,7 +637,7 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
 
        ret = -EFAULT;
        ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
-       if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
+       if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
                int srcu_idx = srcu_read_lock(&kvm->srcu);
                ret = kvm_read_guest(kvm, ptbl_addr,
                                     &ptbl_entry, sizeof(ptbl_entry));
@@ -660,6 +653,35 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
        kvmhv_set_nested_ptbl(gp);
 }
 
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+       idr_init(&kvm->arch.kvm_nested_guest_idr);
+}
+
+static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
+{
+       return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
+static bool __prealloc_nested(struct kvm *kvm, int lpid)
+{
+       if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
+                               NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
+               return false;
+       return true;
+}
+
+static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
+{
+       if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
+               WARN_ON(1);
+}
+
+static void __remove_nested(struct kvm *kvm, int lpid)
+{
+       idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
 {
        struct kvm_nested_guest *gp;
@@ -720,13 +742,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
        long ref;
 
        spin_lock(&kvm->mmu_lock);
-       if (gp == kvm->arch.nested_guests[lpid]) {
-               kvm->arch.nested_guests[lpid] = NULL;
-               if (lpid == kvm->arch.max_nested_lpid) {
-                       while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
-                               ;
-                       kvm->arch.max_nested_lpid = lpid;
-               }
+       if (gp == __find_nested(kvm, lpid)) {
+               __remove_nested(kvm, lpid);
                --gp->refcnt;
        }
        ref = gp->refcnt;
@@ -743,24 +760,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
  */
 void kvmhv_release_all_nested(struct kvm *kvm)
 {
-       int i;
+       int lpid;
        struct kvm_nested_guest *gp;
        struct kvm_nested_guest *freelist = NULL;
        struct kvm_memory_slot *memslot;
        int srcu_idx, bkt;
 
        spin_lock(&kvm->mmu_lock);
-       for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
-               gp = kvm->arch.nested_guests[i];
-               if (!gp)
-                       continue;
-               kvm->arch.nested_guests[i] = NULL;
+       idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+               __remove_nested(kvm, lpid);
                if (--gp->refcnt == 0) {
                        gp->next = freelist;
                        freelist = gp;
                }
        }
-       kvm->arch.max_nested_lpid = -1;
+       idr_destroy(&kvm->arch.kvm_nested_guest_idr);
+       /* idr is empty and may be reused at this point */
        spin_unlock(&kvm->mmu_lock);
        while ((gp = freelist) != NULL) {
                freelist = gp->next;
@@ -792,12 +807,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
 {
        struct kvm_nested_guest *gp, *newgp;
 
-       if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
-           l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+       if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
                return NULL;
 
        spin_lock(&kvm->mmu_lock);
-       gp = kvm->arch.nested_guests[l1_lpid];
+       gp = __find_nested(kvm, l1_lpid);
        if (gp)
                ++gp->refcnt;
        spin_unlock(&kvm->mmu_lock);
@@ -808,17 +822,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
        newgp = kvmhv_alloc_nested(kvm, l1_lpid);
        if (!newgp)
                return NULL;
+
+       if (!__prealloc_nested(kvm, l1_lpid)) {
+               kvmhv_release_nested(newgp);
+               return NULL;
+       }
+
        spin_lock(&kvm->mmu_lock);
-       if (kvm->arch.nested_guests[l1_lpid]) {
-               /* someone else beat us to it */
-               gp = kvm->arch.nested_guests[l1_lpid];
-       } else {
-               kvm->arch.nested_guests[l1_lpid] = newgp;
+       gp = __find_nested(kvm, l1_lpid);
+       if (!gp) {
+               __add_nested(kvm, l1_lpid, newgp);
                ++newgp->refcnt;
                gp = newgp;
                newgp = NULL;
-               if (l1_lpid > kvm->arch.max_nested_lpid)
-                       kvm->arch.max_nested_lpid = l1_lpid;
        }
        ++gp->refcnt;
        spin_unlock(&kvm->mmu_lock);
@@ -841,20 +857,13 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
                kvmhv_release_nested(gp);
 }
 
-static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
-{
-       if (lpid > kvm->arch.max_nested_lpid)
-               return NULL;
-       return kvm->arch.nested_guests[lpid];
-}
-
 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
                                 unsigned long ea, unsigned *hshift)
 {
        struct kvm_nested_guest *gp;
        pte_t *pte;
 
-       gp = kvmhv_find_nested(kvm, lpid);
+       gp = __find_nested(kvm, lpid);
        if (!gp)
                return NULL;
 
@@ -960,7 +969,7 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
 
        gpa = n_rmap & RMAP_NESTED_GPA_MASK;
        lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
-       gp = kvmhv_find_nested(kvm, lpid);
+       gp = __find_nested(kvm, lpid);
        if (!gp)
                return;
 
@@ -1152,16 +1161,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
 {
        struct kvm *kvm = vcpu->kvm;
        struct kvm_nested_guest *gp;
-       int i;
+       int lpid;
 
        spin_lock(&kvm->mmu_lock);
-       for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
-               gp = kvm->arch.nested_guests[i];
-               if (gp) {
-                       spin_unlock(&kvm->mmu_lock);
-                       kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
-                       spin_lock(&kvm->mmu_lock);
-               }
+       idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+               spin_unlock(&kvm->mmu_lock);
+               kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+               spin_lock(&kvm->mmu_lock);
        }
        spin_unlock(&kvm->mmu_lock);
 }
@@ -1313,7 +1319,7 @@ long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
         * H_ENTER_NESTED call. Since we can't differentiate this case from
         * the invalid case, we ignore such flush requests and return success.
         */
-       if (!kvmhv_find_nested(vcpu->kvm, lpid))
+       if (!__find_nested(vcpu->kvm, lpid))
                return H_SUCCESS;
 
        /*
@@ -1657,15 +1663,12 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
 
 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
 {
-       int ret = -1;
+       int ret = lpid + 1;
 
        spin_lock(&kvm->mmu_lock);
-       while (++lpid <= kvm->arch.max_nested_lpid) {
-               if (kvm->arch.nested_guests[lpid]) {
-                       ret = lpid;
-                       break;
-               }
-       }
+       if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
+               ret = -1;
        spin_unlock(&kvm->mmu_lock);
+
        return ret;
 }