KVM: stats: Separate generic stats from architecture specific ones
[linux-2.6-microblaze.git] / arch / powerpc / kvm / book3s_hv.c
index 662f599..cd544a4 100644 (file)
@@ -76,6 +76,7 @@
 #include <asm/kvm_book3s_uvmem.h>
 #include <asm/ultravisor.h>
 #include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
 
 #include "book3s.h"
 
@@ -130,9 +131,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
        return kvm->arch.nested_enable && kvm_is_radix(kvm);
 }
 
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix __read_mostly;
-
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
 
 /*
@@ -232,7 +230,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 
        waitp = kvm_arch_vcpu_get_wait(vcpu);
        if (rcuwait_wake_up(waitp))
-               ++vcpu->stat.halt_wakeup;
+               ++vcpu->stat.generic.halt_wakeup;
 
        cpu = READ_ONCE(vcpu->arch.thread_cpu);
        if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -925,6 +923,68 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
        return yield_count;
 }
 
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+       unsigned long type = kvmppc_get_gpr(vcpu, 6);
+       unsigned long pid, pg_sizes, start, end;
+
+       /*
+        * The partition-scoped invalidations aren't handled here in L0.
+        */
+       if (type & H_RPTI_TYPE_NESTED)
+               return RESUME_HOST;
+
+       pid = kvmppc_get_gpr(vcpu, 4);
+       pg_sizes = kvmppc_get_gpr(vcpu, 7);
+       start = kvmppc_get_gpr(vcpu, 8);
+       end = kvmppc_get_gpr(vcpu, 9);
+
+       do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+                               type, pg_sizes, start, end);
+
+       kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+       return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+                                   unsigned long id, unsigned long target,
+                                   unsigned long type, unsigned long pg_sizes,
+                                   unsigned long start, unsigned long end)
+{
+       if (!kvm_is_radix(vcpu->kvm))
+               return H_UNSUPPORTED;
+
+       if (end < start)
+               return H_P5;
+
+       /*
+        * Partition-scoped invalidation for nested guests.
+        */
+       if (type & H_RPTI_TYPE_NESTED) {
+               if (!nesting_enabled(vcpu->kvm))
+                       return H_FUNCTION;
+
+               /* Support only cores as target */
+               if (target != H_RPTI_TARGET_CMMU)
+                       return H_P2;
+
+               return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+                                              start, end);
+       }
+
+       /*
+        * Process-scoped invalidation for L1 guests.
+        */
+       do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+                               type, pg_sizes, start, end);
+       return H_SUCCESS;
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1108,6 +1168,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
                        ret = H_HARDWARE;
                break;
+       case H_RPT_INVALIDATE:
+               ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+                                             kvmppc_get_gpr(vcpu, 5),
+                                             kvmppc_get_gpr(vcpu, 6),
+                                             kvmppc_get_gpr(vcpu, 7),
+                                             kvmppc_get_gpr(vcpu, 8),
+                                             kvmppc_get_gpr(vcpu, 9));
+               break;
 
        case H_SET_PARTITION_TABLE:
                ret = H_FUNCTION;
@@ -1228,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
        case H_XIRR_X:
 #endif
        case H_PAGE_INIT:
+       case H_RPT_INVALIDATE:
                return 1;
        }
 
@@ -1751,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
                if (!xics_on_xive())
                        kvmppc_xics_rm_complete(vcpu, 0);
                break;
+       case BOOK3S_INTERRUPT_SYSCALL:
+       {
+               unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+               /*
+                * The H_RPT_INVALIDATE hcalls issued by nested
+                * guests for process-scoped invalidations when
+                * GTSE=0, are handled here in L0.
+                */
+               if (req == H_RPT_INVALIDATE) {
+                       r = kvmppc_nested_h_rpt_invalidate(vcpu);
+                       break;
+               }
+
+               r = RESUME_HOST;
+               break;
+       }
        default:
                r = RESUME_HOST;
                break;
@@ -2823,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
        cpumask_t *cpu_in_guest;
        int i;
 
-       cpu = cpu_first_thread_sibling(cpu);
+       cpu = cpu_first_tlb_thread_sibling(cpu);
        if (nested) {
                cpumask_set_cpu(cpu, &nested->need_tlb_flush);
                cpu_in_guest = &nested->cpu_in_guest;
@@ -2837,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
         * the other side is the first smp_mb() in kvmppc_run_core().
         */
        smp_mb();
-       for (i = 0; i < threads_per_core; ++i)
-               if (cpumask_test_cpu(cpu + i, cpu_in_guest))
-                       smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+       for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+                                       i += cpu_tlb_thread_sibling_step())
+               if (cpumask_test_cpu(i, cpu_in_guest))
+                       smp_call_function_single(i, do_nothing, NULL, 1);
 }
 
 static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2870,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
         */
        if (prev_cpu != pcpu) {
                if (prev_cpu >= 0 &&
-                   cpu_first_thread_sibling(prev_cpu) !=
-                   cpu_first_thread_sibling(pcpu))
+                   cpu_first_tlb_thread_sibling(prev_cpu) !=
+                   cpu_first_tlb_thread_sibling(pcpu))
                        radix_flush_cpu(kvm, prev_cpu, vcpu);
                if (nested)
                        nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
@@ -3133,9 +3220,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
        for_each_runnable_thread(i, vcpu, vc) {
                if (signal_pending(vcpu->arch.run_task))
                        vcpu->arch.ret = -EINTR;
-               else if (no_mixing_hpt_and_radix &&
-                        kvm_is_radix(vc->kvm) != radix_enabled())
-                       vcpu->arch.ret = -EINVAL;
                else if (vcpu->arch.vpa.update_pending ||
                         vcpu->arch.slb_shadow.update_pending ||
                         vcpu->arch.dtl.update_pending)
@@ -3342,6 +3426,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        int trap;
        bool is_power8;
 
+       if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+               return;
+
        /*
         * Remove from the list any threads that have a signal pending
         * or need a VPA update done
@@ -3369,9 +3456,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         * Make sure we are running on primary threads, and that secondary
         * threads are offline.  Also check if the number of threads in this
         * guest are greater than the current system threads per guest.
-        * On POWER9, we need to be not in independent-threads mode if
-        * this is a HPT guest on a radix host machine where the
-        * CPU threads may not be in different MMU modes.
         */
        if ((controlled_threads > 1) &&
            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
@@ -3395,18 +3479,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        if (vc->num_threads < target_threads)
                collect_piggybacks(&core_info, target_threads);
 
-       /*
-        * On radix, arrange for TLB flushing if necessary.
-        * This has to be done before disabling interrupts since
-        * it uses smp_call_function().
-        */
-       pcpu = smp_processor_id();
-       if (kvm_is_radix(vc->kvm)) {
-               for (sub = 0; sub < core_info.n_subcores; ++sub)
-                       for_each_runnable_thread(i, vcpu, core_info.vc[sub])
-                               kvmppc_prepare_radix_vcpu(vcpu, pcpu);
-       }
-
        /*
         * Hard-disable interrupts, and check resched flag and signals.
         * If we need to reschedule or deliver a signal, clean up
@@ -3439,8 +3511,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        cmd_bit = stat_bit = 0;
        split = core_info.n_subcores;
        sip = NULL;
-       is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
-               && !cpu_has_feature(CPU_FTR_ARCH_300);
+       is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
 
        if (split > 1) {
                sip = &split_info;
@@ -3738,8 +3809,7 @@ static inline bool hcall_is_xics(unsigned long req)
 }
 
 /*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU.  The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
  */
 static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
                         unsigned long lpcr)
@@ -4022,7 +4092,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
        cur = start_poll = ktime_get();
        if (vc->halt_poll_ns) {
                ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
-               ++vc->runner->stat.halt_attempted_poll;
+               ++vc->runner->stat.generic.halt_attempted_poll;
 
                vc->vcore_state = VCORE_POLLING;
                spin_unlock(&vc->lock);
@@ -4033,13 +4103,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                                break;
                        }
                        cur = ktime_get();
-               } while (single_task_running() && ktime_before(cur, stop));
+               } while (kvm_vcpu_can_poll(cur, stop));
 
                spin_lock(&vc->lock);
                vc->vcore_state = VCORE_INACTIVE;
 
                if (!do_sleep) {
-                       ++vc->runner->stat.halt_successful_poll;
+                       ++vc->runner->stat.generic.halt_successful_poll;
                        goto out;
                }
        }
@@ -4051,7 +4121,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
                do_sleep = 0;
                /* If we polled, count this as a successful poll */
                if (vc->halt_poll_ns)
-                       ++vc->runner->stat.halt_successful_poll;
+                       ++vc->runner->stat.generic.halt_successful_poll;
                goto out;
        }
 
@@ -4078,13 +4148,13 @@ out:
                        ktime_to_ns(cur) - ktime_to_ns(start_wait);
                /* Attribute failed poll time */
                if (vc->halt_poll_ns)
-                       vc->runner->stat.halt_poll_fail_ns +=
+                       vc->runner->stat.generic.halt_poll_fail_ns +=
                                ktime_to_ns(start_wait) -
                                ktime_to_ns(start_poll);
        } else {
                /* Attribute successful poll time */
                if (vc->halt_poll_ns)
-                       vc->runner->stat.halt_poll_success_ns +=
+                       vc->runner->stat.generic.halt_poll_success_ns +=
                                ktime_to_ns(cur) -
                                ktime_to_ns(start_poll);
        }
@@ -4511,7 +4581,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
-               if (radix_enabled())
+               if (cpu_has_feature(CPU_FTR_ARCH_300))
                        r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
                                                  vcpu->arch.vcore->lpcr);
                else
@@ -5599,6 +5669,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
                return -EPERM;
        if (!cpu_has_feature(CPU_FTR_ARCH_300))
                return -ENODEV;
+       if (!radix_enabled())
+               return -ENODEV;
 
        /* kvm == NULL means the caller is testing if the capability exists */
        if (kvm)
@@ -5760,11 +5832,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
 
 static bool kvmppc_hash_v3_possible(void)
 {
-       if (radix_enabled() && no_mixing_hpt_and_radix)
+       if (!cpu_has_feature(CPU_FTR_ARCH_300))
+               return false;
+
+       if (!cpu_has_feature(CPU_FTR_HVMODE))
                return false;
 
-       return cpu_has_feature(CPU_FTR_ARCH_300) &&
-               cpu_has_feature(CPU_FTR_HVMODE);
+       /*
+        * POWER9 chips before version 2.02 can't have some threads in
+        * HPT mode and some in radix mode on the same core.
+        */
+       if (radix_enabled()) {
+               unsigned int pvr = mfspr(SPRN_PVR);
+               if ((pvr >> 16) == PVR_POWER9 &&
+                   (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+                    ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+                       return false;
+       }
+
+       return true;
 }
 
 static struct kvmppc_ops kvm_ops_hv = {
@@ -5908,18 +5994,6 @@ static int kvmppc_book3s_init_hv(void)
        if (kvmppc_radix_possible())
                r = kvmppc_radix_init();
 
-       /*
-        * POWER9 chips before version 2.02 can't have some threads in
-        * HPT mode and some in radix mode on the same core.
-        */
-       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-               unsigned int pvr = mfspr(SPRN_PVR);
-               if ((pvr >> 16) == PVR_POWER9 &&
-                   (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
-                    ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
-                       no_mixing_hpt_and_radix = true;
-       }
-
        r = kvmppc_uvmem_init();
        if (r < 0)
                pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);