Merge tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-microblaze.git] / arch / powerpc / kvm / book3s_hv.c
index 6f612d2..13bad6b 100644 (file)
@@ -53,6 +53,7 @@
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
 #include <linux/uaccess.h>
+#include <asm/interrupt.h>
 #include <asm/io.h>
 #include <asm/kvm_ppc.h>
 #include <asm/kvm_book3s.h>
@@ -134,7 +135,7 @@ static inline bool nesting_enabled(struct kvm *kvm)
 }
 
 /* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix;
+static bool no_mixing_hpt_and_radix __read_mostly;
 
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
@@ -782,8 +783,24 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
                        return H_UNSUPPORTED_FLAG_START;
                if (value2 & DABRX_HYP)
                        return H_P4;
-               vcpu->arch.dawr  = value1;
-               vcpu->arch.dawrx = value2;
+               vcpu->arch.dawr0  = value1;
+               vcpu->arch.dawrx0 = value2;
+               return H_SUCCESS;
+       case H_SET_MODE_RESOURCE_SET_DAWR1:
+               if (!kvmppc_power8_compatible(vcpu))
+                       return H_P2;
+               if (!ppc_breakpoint_available())
+                       return H_P2;
+               if (!cpu_has_feature(CPU_FTR_DAWR1))
+                       return H_P2;
+               if (!vcpu->kvm->arch.dawr1_enabled)
+                       return H_FUNCTION;
+               if (mflags)
+                       return H_UNSUPPORTED_FLAG_START;
+               if (value2 & DABRX_HYP)
+                       return H_P4;
+               vcpu->arch.dawr1  = value1;
+               vcpu->arch.dawrx1 = value2;
                return H_SUCCESS;
        case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
                /* KVM does not support mflags=2 (AIL=2) */
@@ -1759,10 +1776,16 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                *val = get_reg_val(id, vcpu->arch.vcore->vtb);
                break;
        case KVM_REG_PPC_DAWR:
-               *val = get_reg_val(id, vcpu->arch.dawr);
+               *val = get_reg_val(id, vcpu->arch.dawr0);
                break;
        case KVM_REG_PPC_DAWRX:
-               *val = get_reg_val(id, vcpu->arch.dawrx);
+               *val = get_reg_val(id, vcpu->arch.dawrx0);
+               break;
+       case KVM_REG_PPC_DAWR1:
+               *val = get_reg_val(id, vcpu->arch.dawr1);
+               break;
+       case KVM_REG_PPC_DAWRX1:
+               *val = get_reg_val(id, vcpu->arch.dawrx1);
                break;
        case KVM_REG_PPC_CIABR:
                *val = get_reg_val(id, vcpu->arch.ciabr);
@@ -1991,10 +2014,16 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
                vcpu->arch.vcore->vtb = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_DAWR:
-               vcpu->arch.dawr = set_reg_val(id, *val);
+               vcpu->arch.dawr0 = set_reg_val(id, *val);
                break;
        case KVM_REG_PPC_DAWRX:
-               vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+               vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+               break;
+       case KVM_REG_PPC_DAWR1:
+               vcpu->arch.dawr1 = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_DAWRX1:
+               vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
                break;
        case KVM_REG_PPC_CIABR:
                vcpu->arch.ciabr = set_reg_val(id, *val);
@@ -2862,11 +2891,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
        if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
                return false;
 
-       /* Some POWER9 chips require all threads to be in the same MMU mode */
-       if (no_mixing_hpt_and_radix &&
-           kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
-               return false;
-
        if (n_threads < cip->max_subcore_threads)
                n_threads = cip->max_subcore_threads;
        if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
@@ -2905,6 +2929,9 @@ static void prepare_threads(struct kvmppc_vcore *vc)
        for_each_runnable_thread(i, vcpu, vc) {
                if (signal_pending(vcpu->arch.run_task))
                        vcpu->arch.ret = -EINTR;
+               else if (no_mixing_hpt_and_radix &&
+                        kvm_is_radix(vc->kvm) != radix_enabled())
+                       vcpu->arch.ret = -EINVAL;
                else if (vcpu->arch.vpa.update_pending ||
                         vcpu->arch.slb_shadow.update_pending ||
                         vcpu->arch.dtl.update_pending)
@@ -3110,7 +3137,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        int controlled_threads;
        int trap;
        bool is_power8;
-       bool hpt_on_radix;
 
        /*
         * Remove from the list any threads that have a signal pending
@@ -3143,11 +3169,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         * this is a HPT guest on a radix host machine where the
         * CPU threads may not be in different MMU modes.
         */
-       hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
-               !kvm_is_radix(vc->kvm);
-       if (((controlled_threads > 1) &&
-            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
-           (hpt_on_radix && vc->kvm->arch.threads_indep)) {
+       if ((controlled_threads > 1) &&
+           ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
                for_each_runnable_thread(i, vcpu, vc) {
                        vcpu->arch.ret = -EBUSY;
                        kvmppc_remove_runnable(vc, vcpu);
@@ -3215,7 +3238,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
                && !cpu_has_feature(CPU_FTR_ARCH_300);
 
-       if (split > 1 || hpt_on_radix) {
+       if (split > 1) {
                sip = &split_info;
                memset(&split_info, 0, sizeof(split_info));
                for (sub = 0; sub < core_info.n_subcores; ++sub)
@@ -3237,13 +3260,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                        split_info.subcore_size = subcore_size;
                } else {
                        split_info.subcore_size = 1;
-                       if (hpt_on_radix) {
-                               /* Use the split_info for LPCR/LPIDR changes */
-                               split_info.lpcr_req = vc->lpcr;
-                               split_info.lpidr_req = vc->kvm->arch.lpid;
-                               split_info.host_lpcr = vc->kvm->arch.host_lpcr;
-                               split_info.do_set = 1;
-                       }
                }
 
                /* order writes to split_info before kvm_split_mode pointer */
@@ -3253,7 +3269,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        for (thr = 0; thr < controlled_threads; ++thr) {
                struct paca_struct *paca = paca_ptrs[pcpu + thr];
 
-               paca->kvm_hstate.tid = thr;
                paca->kvm_hstate.napping = 0;
                paca->kvm_hstate.kvm_split_mode = sip;
        }
@@ -3327,10 +3342,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         * When doing micro-threading, poke the inactive threads as well.
         * This gets them to the nap instruction after kvm_do_nap,
         * which reduces the time taken to unsplit later.
-        * For POWER9 HPT guest on radix host, we need all the secondary
-        * threads woken up so they can do the LPCR/LPIDR change.
         */
-       if (cmd_bit || hpt_on_radix) {
+       if (cmd_bit) {
                split_info.do_nap = 1;  /* ask secondaries to nap when done */
                for (thr = 1; thr < threads_per_subcore; ++thr)
                        if (!(active & (1 << thr)))
@@ -3391,24 +3404,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                        cpu_relax();
                        ++loops;
                }
-       } else if (hpt_on_radix) {
-               /* Wait for all threads to have seen final sync */
-               for (thr = 1; thr < controlled_threads; ++thr) {
-                       struct paca_struct *paca = paca_ptrs[pcpu + thr];
-
-                       while (paca->kvm_hstate.kvm_split_mode) {
-                               HMT_low();
-                               barrier();
-                       }
-                       HMT_medium();
-               }
+               split_info.do_nap = 0;
        }
-       split_info.do_nap = 0;
 
        kvmppc_set_host_core(pcpu);
 
+       guest_exit_irqoff();
+
        local_irq_enable();
-       guest_exit();
 
        /* Let secondaries go back to the offline loop */
        for (i = 0; i < controlled_threads; ++i) {
@@ -3449,10 +3452,17 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
        int trap;
        unsigned long host_hfscr = mfspr(SPRN_HFSCR);
        unsigned long host_ciabr = mfspr(SPRN_CIABR);
-       unsigned long host_dawr = mfspr(SPRN_DAWR0);
-       unsigned long host_dawrx = mfspr(SPRN_DAWRX0);
+       unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
+       unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
        unsigned long host_psscr = mfspr(SPRN_PSSCR);
        unsigned long host_pidr = mfspr(SPRN_PID);
+       unsigned long host_dawr1 = 0;
+       unsigned long host_dawrx1 = 0;
+
+       if (cpu_has_feature(CPU_FTR_DAWR1)) {
+               host_dawr1 = mfspr(SPRN_DAWR1);
+               host_dawrx1 = mfspr(SPRN_DAWRX1);
+       }
 
        /*
         * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
@@ -3489,8 +3499,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
        mtspr(SPRN_SPURR, vcpu->arch.spurr);
 
        if (dawr_enabled()) {
-               mtspr(SPRN_DAWR0, vcpu->arch.dawr);
-               mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
+               mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+               mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+               if (cpu_has_feature(CPU_FTR_DAWR1)) {
+                       mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+                       mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+               }
        }
        mtspr(SPRN_CIABR, vcpu->arch.ciabr);
        mtspr(SPRN_IC, vcpu->arch.ic);
@@ -3542,8 +3556,12 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
              (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
        mtspr(SPRN_HFSCR, host_hfscr);
        mtspr(SPRN_CIABR, host_ciabr);
-       mtspr(SPRN_DAWR0, host_dawr);
-       mtspr(SPRN_DAWRX0, host_dawrx);
+       mtspr(SPRN_DAWR0, host_dawr0);
+       mtspr(SPRN_DAWRX0, host_dawrx0);
+       if (cpu_has_feature(CPU_FTR_DAWR1)) {
+               mtspr(SPRN_DAWR1, host_dawr1);
+               mtspr(SPRN_DAWRX1, host_dawrx1);
+       }
        mtspr(SPRN_PID, host_pidr);
 
        /*
@@ -3595,6 +3613,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
        unsigned long host_tidr = mfspr(SPRN_TIDR);
        unsigned long host_iamr = mfspr(SPRN_IAMR);
        unsigned long host_amr = mfspr(SPRN_AMR);
+       unsigned long host_fscr = mfspr(SPRN_FSCR);
        s64 dec;
        u64 tb;
        int trap, save_pmu;
@@ -3735,6 +3754,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
        if (host_amr != vcpu->arch.amr)
                mtspr(SPRN_AMR, host_amr);
 
+       if (host_fscr != vcpu->arch.fscr)
+               mtspr(SPRN_FSCR, host_fscr);
+
        msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
        store_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
@@ -4173,7 +4195,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
        kvmppc_clear_host_core(pcpu);
 
-       local_paca->kvm_hstate.tid = 0;
        local_paca->kvm_hstate.napping = 0;
        local_paca->kvm_hstate.kvm_split_mode = NULL;
        kvmppc_start_thread(vcpu, vc);
@@ -4217,8 +4238,9 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
        kvmppc_set_host_core(pcpu);
 
+       guest_exit_irqoff();
+
        local_irq_enable();
-       guest_exit();
 
        cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
 
@@ -4358,15 +4380,11 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
 
        do {
                /*
-                * The early POWER9 chips that can't mix radix and HPT threads
-                * on the same core also need the workaround for the problem
-                * where the TLB would prefetch entries in the guest exit path
-                * for radix guests using the guest PIDR value and LPID 0.
-                * The workaround is in the old path (kvmppc_run_vcpu())
-                * but not the new path (kvmhv_run_single_vcpu()).
+                * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
+                * path, which also handles hash and dependent threads mode.
                 */
                if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
-                   !no_mixing_hpt_and_radix)
+                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                else
@@ -5599,6 +5617,26 @@ out:
        return ret;
 }
 
+static int kvmhv_enable_dawr1(struct kvm *kvm)
+{
+       if (!cpu_has_feature(CPU_FTR_DAWR1))
+               return -ENODEV;
+
+       /* kvm == NULL means the caller is testing if the capability exists */
+       if (kvm)
+               kvm->arch.dawr1_enabled = true;
+       return 0;
+}
+
+static bool kvmppc_hash_v3_possible(void)
+{
+       if (radix_enabled() && no_mixing_hpt_and_radix)
+               return false;
+
+       return cpu_has_feature(CPU_FTR_ARCH_300) &&
+               cpu_has_feature(CPU_FTR_HVMODE);
+}
+
 static struct kvmppc_ops kvm_ops_hv = {
        .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
        .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -5642,6 +5680,8 @@ static struct kvmppc_ops kvm_ops_hv = {
        .store_to_eaddr = kvmhv_store_to_eaddr,
        .enable_svm = kvmhv_enable_svm,
        .svm_off = kvmhv_svm_off,
+       .enable_dawr1 = kvmhv_enable_dawr1,
+       .hash_v3_possible = kvmppc_hash_v3_possible,
 };
 
 static int kvm_init_subcore_bitmap(void)