KVM: stats: Separate generic stats from architecture specific ones
[linux-2.6-microblaze.git] / arch / powerpc / kvm / book3s_hv.c
index acb0c72..cd544a4 100644 (file)
@@ -76,6 +76,7 @@
 #include <asm/kvm_book3s_uvmem.h>
 #include <asm/ultravisor.h>
 #include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
 
 #include "book3s.h"
 
@@ -130,9 +131,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
        return kvm->arch.nested_enable && kvm_is_radix(kvm);
 }
 
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix __read_mostly;
-
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
 /*
@@ -232,7 +230,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 
        waitp = kvm_arch_vcpu_get_wait(vcpu);
        if (rcuwait_wake_up(waitp))
-               ++vcpu->stat.halt_wakeup;
+               ++vcpu->stat.generic.halt_wakeup;
 
        cpu = READ_ONCE(vcpu->arch.thread_cpu);
        if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -925,8 +923,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
        return yield_count;
 }
 
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+       unsigned long type = kvmppc_get_gpr(vcpu, 6);
+       unsigned long pid, pg_sizes, start, end;
+
+       /*
+        * The partition-scoped invalidations aren't handled here in L0.
+        */
+       if (type & H_RPTI_TYPE_NESTED)
+               return RESUME_HOST;
+
+       pid = kvmppc_get_gpr(vcpu, 4);
+       pg_sizes = kvmppc_get_gpr(vcpu, 7);
+       start = kvmppc_get_gpr(vcpu, 8);
+       end = kvmppc_get_gpr(vcpu, 9);
+
+       do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+                               type, pg_sizes, start, end);
+
+       kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+       return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+                                   unsigned long id, unsigned long target,
+                                   unsigned long type, unsigned long pg_sizes,
+                                   unsigned long start, unsigned long end)
+{
+       if (!kvm_is_radix(vcpu->kvm))
+               return H_UNSUPPORTED;
+
+       if (end < start)
+               return H_P5;
+
+       /*
+        * Partition-scoped invalidation for nested guests.
+        */
+       if (type & H_RPTI_TYPE_NESTED) {
+               if (!nesting_enabled(vcpu->kvm))
+                       return H_FUNCTION;
+
+               /* Support only cores as target */
+               if (target != H_RPTI_TARGET_CMMU)
+                       return H_P2;
+
+               return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+                                              start, end);
+       }
+
+       /*
+        * Process-scoped invalidation for L1 guests.
+        */
+       do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+                               type, pg_sizes, start, end);
+       return H_SUCCESS;
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
+       struct kvm *kvm = vcpu->kvm;
        unsigned long req = kvmppc_get_gpr(vcpu, 3);
        unsigned long target, ret = H_SUCCESS;
        int yield_count;
@@ -938,11 +999,57 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                return RESUME_HOST;
 
        switch (req) {
+       case H_REMOVE:
+               ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_ENTER:
+               ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6),
+                                       kvmppc_get_gpr(vcpu, 7));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_READ:
+               ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_CLEAR_MOD:
+               ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_CLEAR_REF:
+               ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_PROTECT:
+               ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                       kvmppc_get_gpr(vcpu, 5),
+                                       kvmppc_get_gpr(vcpu, 6));
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+       case H_BULK_REMOVE:
+               ret = kvmppc_h_bulk_remove(vcpu);
+               if (ret == H_TOO_HARD)
+                       return RESUME_HOST;
+               break;
+
        case H_CEDE:
                break;
        case H_PROD:
                target = kvmppc_get_gpr(vcpu, 4);
-               tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+               tvcpu = kvmppc_find_vcpu(kvm, target);
                if (!tvcpu) {
                        ret = H_PARAMETER;
                        break;
@@ -956,7 +1063,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                target = kvmppc_get_gpr(vcpu, 4);
                if (target == -1)
                        break;
-               tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+               tvcpu = kvmppc_find_vcpu(kvm, target);
                if (!tvcpu) {
                        ret = H_PARAMETER;
                        break;
@@ -972,12 +1079,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                                        kvmppc_get_gpr(vcpu, 6));
                break;
        case H_RTAS:
-               if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+               if (list_empty(&kvm->arch.rtas_tokens))
                        return RESUME_HOST;
 
-               idx = srcu_read_lock(&vcpu->kvm->srcu);
+               idx = srcu_read_lock(&kvm->srcu);
                rc = kvmppc_rtas_hcall(vcpu);
-               srcu_read_unlock(&vcpu->kvm->srcu, idx);
+               srcu_read_unlock(&kvm->srcu, idx);
 
                if (rc == -ENOENT)
                        return RESUME_HOST;
@@ -1061,15 +1168,23 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
                        ret = H_HARDWARE;
                break;
+       case H_RPT_INVALIDATE:
+               ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                             kvmppc_get_gpr(vcpu, 5),
+                                             kvmppc_get_gpr(vcpu, 6),
+                                             kvmppc_get_gpr(vcpu, 7),
+                                             kvmppc_get_gpr(vcpu, 8),
+                                             kvmppc_get_gpr(vcpu, 9));
+               break;
 
        case H_SET_PARTITION_TABLE:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_set_partition_table(vcpu);
                break;
        case H_ENTER_NESTED:
                ret = H_FUNCTION;
-               if (!nesting_enabled(vcpu->kvm))
+               if (!nesting_enabled(kvm))
                        break;
                ret = kvmhv_enter_nested_guest(vcpu);
                if (ret == H_INTERRUPT) {
@@ -1084,12 +1199,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                break;
        case H_TLB_INVALIDATE:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_do_nested_tlbie(vcpu);
                break;
        case H_COPY_TOFROM_GUEST:
                ret = H_FUNCTION;
-               if (nesting_enabled(vcpu->kvm))
+               if (nesting_enabled(kvm))
                        ret = kvmhv_copy_tofrom_guest_nested(vcpu);
                break;
        case H_PAGE_INIT:
@@ -1100,7 +1215,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_PAGE_IN:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_page_in(vcpu->kvm,
+                       ret = kvmppc_h_svm_page_in(kvm,
                                                   kvmppc_get_gpr(vcpu, 4),
                                                   kvmppc_get_gpr(vcpu, 5),
                                                   kvmppc_get_gpr(vcpu, 6));
@@ -1108,7 +1223,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_PAGE_OUT:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_page_out(vcpu->kvm,
+                       ret = kvmppc_h_svm_page_out(kvm,
                                                    kvmppc_get_gpr(vcpu, 4),
                                                    kvmppc_get_gpr(vcpu, 5),
                                                    kvmppc_get_gpr(vcpu, 6));
@@ -1116,12 +1231,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
        case H_SVM_INIT_START:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_init_start(vcpu->kvm);
+                       ret = kvmppc_h_svm_init_start(kvm);
                break;
        case H_SVM_INIT_DONE:
                ret = H_UNSUPPORTED;
                if (kvmppc_get_srr1(vcpu) & MSR_S)
-                       ret = kvmppc_h_svm_init_done(vcpu->kvm);
+                       ret = kvmppc_h_svm_init_done(kvm);
                break;
        case H_SVM_INIT_ABORT:
                /*
@@ -1131,12 +1246,13 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                 * Instead the kvm->arch.secure_guest flag is checked inside
                 * kvmppc_h_svm_init_abort().
                 */
-               ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+               ret = kvmppc_h_svm_init_abort(kvm);
                break;
 
        default:
                return RESUME_HOST;
        }
+       WARN_ON_ONCE(ret == H_TOO_HARD);
        kvmppc_set_gpr(vcpu, 3, ret);
        vcpu->arch.hcall_needed = 0;
        return RESUME_GUEST;
@@ -1180,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
        case H_XIRR_X:
 #endif
        case H_PAGE_INIT:
+       case H_RPT_INVALIDATE:
                return 1;
        }
 
@@ -1409,13 +1526,23 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                         * Guest userspace executed sc 1. This can only be
                         * reached by the P9 path because the old path
                         * handles this case in realmode hcall handlers.
-                        *
-                        * Radix guests can not run PR KVM or nested HV hash
-                        * guests which might run PR KVM, so this is always
-                        * a privilege fault. Send a program check to guest
-                        * kernel.
                         */
-                       kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+                       if (!kvmhv_vcpu_is_radix(vcpu)) {
+                               /*
+                                * A guest could be running PR KVM, so this
+                                * may be a PR KVM hcall. It must be reflected
+                                * to the guest kernel as a sc interrupt.
+                                */
+                               kvmppc_core_queue_syscall(vcpu);
+                       } else {
+                               /*
+                                * Radix guests can not run PR KVM or nested HV
+                                * hash guests which might run PR KVM, so this
+                                * is always a privilege fault. Send a program
+                                * check to guest kernel.
+                                */
+                               kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+                       }
                        r = RESUME_GUEST;
                        break;
                }
@@ -1437,22 +1564,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
         * We get these next two if the guest accesses a page which it thinks
         * it has mapped but which is not actually present, either because
         * it is for an emulated I/O device or because the corresonding
-        * host page has been paged out.  Any other HDSI/HISI interrupts
-        * have been handled already.
+        * host page has been paged out.
+        *
+        * Any other HDSI/HISI interrupts have been handled already for P7/8
+        * guests. For POWER9 hash guests not using rmhandlers, basic hash
+        * fault handling is done here.
         */
-       case BOOK3S_INTERRUPT_H_DATA_STORAGE:
-               r = RESUME_PAGE_FAULT;
-               if (vcpu->arch.fault_dsisr == HDSISR_CANARY)
+       case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+               unsigned long vsid;
+               long err;
+
+               if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
                        r = RESUME_GUEST; /* Just retry if it's the canary */
+                       break;
+               }
+
+               if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+                       /*
+                        * Radix doesn't require anything, and pre-ISAv3.0 hash
+                        * already attempted to handle this in rmhandlers. The
+                        * hash fault handling below is v3 only (it uses ASDR
+                        * via fault_gpa).
+                        */
+                       r = RESUME_PAGE_FAULT;
+                       break;
+               }
+
+               if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+                       kvmppc_core_queue_data_storage(vcpu,
+                               vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               if (!(vcpu->arch.shregs.msr & MSR_DR))
+                       vsid = vcpu->kvm->arch.vrma_slb_v;
+               else
+                       vsid = vcpu->arch.fault_gpa;
+
+               err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+                               vsid, vcpu->arch.fault_dsisr, true);
+               if (err == 0) {
+                       r = RESUME_GUEST;
+               } else if (err == -1 || err == -2) {
+                       r = RESUME_PAGE_FAULT;
+               } else {
+                       kvmppc_core_queue_data_storage(vcpu,
+                               vcpu->arch.fault_dar, err);
+                       r = RESUME_GUEST;
+               }
                break;
-       case BOOK3S_INTERRUPT_H_INST_STORAGE:
+       }
+       case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+               unsigned long vsid;
+               long err;
+
                vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
                vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
                        DSISR_SRR1_MATCH_64S;
-               if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
-                       vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
-               r = RESUME_PAGE_FAULT;
+               if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+                       /*
+                        * Radix doesn't require anything, and pre-ISAv3.0 hash
+                        * already attempted to handle this in rmhandlers. The
+                        * hash fault handling below is v3 only (it uses ASDR
+                        * via fault_gpa).
+                        */
+                       if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+                               vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+                       r = RESUME_PAGE_FAULT;
+                       break;
+               }
+
+               if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+                       kvmppc_core_queue_inst_storage(vcpu,
+                               vcpu->arch.fault_dsisr);
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               if (!(vcpu->arch.shregs.msr & MSR_IR))
+                       vsid = vcpu->kvm->arch.vrma_slb_v;
+               else
+                       vsid = vcpu->arch.fault_gpa;
+
+               err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+                               vsid, vcpu->arch.fault_dsisr, false);
+               if (err == 0) {
+                       r = RESUME_GUEST;
+               } else if (err == -1) {
+                       r = RESUME_PAGE_FAULT;
+               } else {
+                       kvmppc_core_queue_inst_storage(vcpu, err);
+                       r = RESUME_GUEST;
+               }
                break;
+       }
+
        /*
         * This occurs if the guest executes an illegal instruction.
         * If the guest debug is disabled, generate a program interrupt
@@ -1613,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
                if (!xics_on_xive())
                        kvmppc_xics_rm_complete(vcpu, 0);
                break;
+       case BOOK3S_INTERRUPT_SYSCALL:
+       {
+               unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+               /*
+                * The H_RPT_INVALIDATE hcalls issued by nested
+                * guests for process-scoped invalidations when
+                * GTSE=0, are handled here in L0.
+                */
+               if (req == H_RPT_INVALIDATE) {
+                       r = kvmppc_nested_h_rpt_invalidate(vcpu);
+                       break;
+               }
+
+               r = RESUME_HOST;
+               break;
+       }
        default:
                r = RESUME_HOST;
                break;
@@ -2685,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
        cpumask_t *cpu_in_guest;
        int i;
 
-       cpu = cpu_first_thread_sibling(cpu);
+       cpu = cpu_first_tlb_thread_sibling(cpu);
        if (nested) {
                cpumask_set_cpu(cpu, &nested->need_tlb_flush);
                cpu_in_guest = &nested->cpu_in_guest;
@@ -2699,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
         * the other side is the first smp_mb() in kvmppc_run_core().
         */
        smp_mb();
-       for (i = 0; i < threads_per_core; ++i)
-               if (cpumask_test_cpu(cpu + i, cpu_in_guest))
-                       smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+       for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+                                       i += cpu_tlb_thread_sibling_step())
+               if (cpumask_test_cpu(i, cpu_in_guest))
+                       smp_call_function_single(i, do_nothing, NULL, 1);
 }
 
 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2732,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
         */
        if (prev_cpu != pcpu) {
                if (prev_cpu >= 0 &&
-                   cpu_first_thread_sibling(prev_cpu) !=
-                   cpu_first_thread_sibling(pcpu))
+                   cpu_first_tlb_thread_sibling(prev_cpu) !=
+                   cpu_first_tlb_thread_sibling(pcpu))
                        radix_flush_cpu(kvm, prev_cpu, vcpu);
                if (nested)
                        nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
@@ -2995,9 +3220,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
        for_each_runnable_thread(i, vcpu, vc) {
                if (signal_pending(vcpu->arch.run_task))
                        vcpu->arch.ret = -EINTR;
-               else if (no_mixing_hpt_and_radix &&
-                        kvm_is_radix(vc->kvm) != radix_enabled())
-                       vcpu->arch.ret = -EINVAL;
                else if (vcpu->arch.vpa.update_pending ||
                         vcpu->arch.slb_shadow.update_pending ||
                         vcpu->arch.dtl.update_pending)
@@ -3204,6 +3426,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        int trap;
        bool is_power8;
 
+       if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+               return;
+
        /*
         * Remove from the list any threads that have a signal pending
         * or need a VPA update done
@@ -3231,9 +3456,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         * Make sure we are running on primary threads, and that secondary
         * threads are offline.  Also check if the number of threads in this
         * guest are greater than the current system threads per guest.
-        * On POWER9, we need to be not in independent-threads mode if
-        * this is a HPT guest on a radix host machine where the
-        * CPU threads may not be in different MMU modes.
         */
        if ((controlled_threads > 1) &&
            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
@@ -3257,18 +3479,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        if (vc->num_threads < target_threads)
                collect_piggybacks(&core_info, target_threads);
 
-       /*
-        * On radix, arrange for TLB flushing if necessary.
-        * This has to be done before disabling interrupts since
-        * it uses smp_call_function().
-        */
-       pcpu = smp_processor_id();
-       if (kvm_is_radix(vc->kvm)) {
-               for (sub = 0; sub < core_info.n_subcores; ++sub)
-                       for_each_runnable_thread(i, vcpu, core_info.vc[sub])
-                               kvmppc_prepare_radix_vcpu(vcpu, pcpu);
-       }
-
        /*
         * Hard-disable interrupts, and check resched flag and signals.
         * If we need to reschedule or deliver a signal, clean up
@@ -3301,8 +3511,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        cmd_bit = stat_bit = 0;
        split = core_info.n_subcores;
        sip = NULL;
-       is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
-               && !cpu_has_feature(CPU_FTR_ARCH_300);
+       is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
 
        if (split > 1) {
                sip = &split_info;
@@ -3600,8 +3809,7 @@ static inline bool hcall_is_xics(unsigned long req)
 }
 
 /*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU.  The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
  */
 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
                         unsigned long lpcr)
@@ -3737,7 +3945,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
                }
                kvmppc_xive_pull_vcpu(vcpu);
 
-               vcpu->arch.slb_max = 0;
+               if (kvm_is_radix(vcpu->kvm))
+                       vcpu->arch.slb_max = 0;
        }
 
        dec = mfspr(SPRN_DEC);
@@ -3883,7 +4092,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
        cur = start_poll = ktime_get();
        if (vc->halt_poll_ns) {
                ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
-               ++vc->runner->stat.halt_attempted_poll;
+               ++vc->runner->stat.generic.halt_attempted_poll;
 
                vc->vcore_state = VCORE_POLLING;
                spin_unlock(&vc->lock);
@@ -3894,13 +4103,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                                break;
                        }
                        cur = ktime_get();
-               } while (single_task_running() && ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
 
                spin_lock(&vc->lock);
                vc->vcore_state = VCORE_INACTIVE;
 
                if (!do_sleep) {
-                       ++vc->runner->stat.halt_successful_poll;
+                       ++vc->runner->stat.generic.halt_successful_poll;
                        goto out;
                }
        }
@@ -3912,7 +4121,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                do_sleep = 0;
                /* If we polled, count this as a successful poll */
                if (vc->halt_poll_ns)
-                       ++vc->runner->stat.halt_successful_poll;
+                       ++vc->runner->stat.generic.halt_successful_poll;
                goto out;
        }
 
@@ -3939,13 +4148,13 @@ out:
                        ktime_to_ns(cur) - ktime_to_ns(start_wait);
                /* Attribute failed poll time */
                if (vc->halt_poll_ns)
-                       vc->runner->stat.halt_poll_fail_ns +=
+                       vc->runner->stat.generic.halt_poll_fail_ns +=
                                ktime_to_ns(start_wait) -
                                ktime_to_ns(start_poll);
        } else {
                /* Attribute successful poll time */
                if (vc->halt_poll_ns)
-                       vc->runner->stat.halt_poll_success_ns +=
+                       vc->runner->stat.generic.halt_poll_success_ns +=
                                ktime_to_ns(cur) -
                                ktime_to_ns(start_poll);
        }
@@ -3972,7 +4181,6 @@ out:
 /*
  * This never fails for a radix guest, as none of the operations it does
  * for a radix guest can fail or have a way to report failure.
- * kvmhv_run_single_vcpu() relies on this fact.
  */
 static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
 {
@@ -4151,8 +4359,15 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        vc->runner = vcpu;
 
        /* See if the MMU is ready to go */
-       if (!kvm->arch.mmu_ready)
-               kvmhv_setup_mmu(vcpu);
+       if (!kvm->arch.mmu_ready) {
+               r = kvmhv_setup_mmu(vcpu);
+               if (r) {
+                       run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+                       run->fail_entry.hardware_entry_failure_reason = 0;
+                       vcpu->arch.ret = r;
+                       return r;
+               }
+       }
 
        if (need_resched())
                cond_resched();
@@ -4165,7 +4380,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        preempt_disable();
        pcpu = smp_processor_id();
        vc->pcpu = pcpu;
-       kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+       if (kvm_is_radix(kvm))
+               kvmppc_prepare_radix_vcpu(vcpu, pcpu);
 
        local_irq_disable();
        hard_irq_disable();
@@ -4365,7 +4581,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
-               if (kvm_is_radix(kvm))
+               if (cpu_has_feature(CPU_FTR_ARCH_300))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                else
@@ -5451,7 +5667,9 @@ static int kvmhv_enable_nested(struct kvm *kvm)
 {
        if (!nested)
                return -EPERM;
-       if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+       if (!cpu_has_feature(CPU_FTR_ARCH_300))
+               return -ENODEV;
+       if (!radix_enabled())
                return -ENODEV;
 
        /* kvm == NULL means the caller is testing if the capability exists */
@@ -5614,11 +5832,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
 
 static bool kvmppc_hash_v3_possible(void)
 {
-       if (radix_enabled() && no_mixing_hpt_and_radix)
+       if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return false;
 
-       return cpu_has_feature(CPU_FTR_ARCH_300) &&
-               cpu_has_feature(CPU_FTR_HVMODE);
+       if (!cpu_has_feature(CPU_FTR_HVMODE))
+               return false;
+
+       /*
+        * POWER9 chips before version 2.02 can't have some threads in
+        * HPT mode and some in radix mode on the same core.
+        */
+       if (radix_enabled()) {
+               unsigned int pvr = mfspr(SPRN_PVR);
+               if ((pvr >> 16) == PVR_POWER9 &&
+                   (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+                    ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+                       return false;
+       }
+
+       return true;
 }
 
 static struct kvmppc_ops kvm_ops_hv = {
@@ -5762,18 +5994,6 @@ static int kvmppc_book3s_init_hv(void)
        if (kvmppc_radix_possible())
                r = kvmppc_radix_init();
 
-       /*
-        * POWER9 chips before version 2.02 can't have some threads in
-        * HPT mode and some in radix mode on the same core.
-        */
-       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-               unsigned int pvr = mfspr(SPRN_PVR);
-               if ((pvr >> 16) == PVR_POWER9 &&
-                   (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
-                    ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
-                       no_mixing_hpt_and_radix = true;
-       }
-
        r = kvmppc_uvmem_init();
        if (r < 0)
                pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);