KVM: Move x86's perf guest info callbacks to generic KVM
authorSean Christopherson <seanjc@google.com>
Thu, 11 Nov 2021 02:07:33 +0000 (02:07 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 17 Nov 2021 13:49:10 +0000 (14:49 +0100)
Move x86's perf guest callbacks into common KVM, as they are semantically
identical to arm64's callbacks (the only other such KVM callbacks).
arm64 will convert to the common versions in a future patch.

Implement the necessary arm64 arch hooks now to avoid having to provide
stubs or a temporary #define (from x86) to avoid arm64 compilation errors
when CONFIG_GUEST_PERF_EVENTS=y.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211111020738.2512932-13-seanjc@google.com
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index f680f30..aa28b8e 100644 (file)
@@ -678,6 +678,16 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 void kvm_perf_init(void);
 void kvm_perf_teardown(void);
 
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context.  For arm64, any event that arrives while a vCPU is
+ * loaded is considered to be "in guest".
+ */
+static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+       return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
+}
+
 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
index 2f03cbf..b400be9 100644 (file)
@@ -496,6 +496,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
        return vcpu_mode_priv(vcpu);
 }
 
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+       return *vcpu_pc(vcpu);
+}
+
 /* Just ensure a guest exit from a particular CPU */
 static void exit_vm_noop(void *info)
 {
index 38f01b0..8957654 100644 (file)
@@ -1567,6 +1567,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
                return -ENOTSUPP;
 }
 
+#define kvm_arch_pmi_in_guest(vcpu) \
+       ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
index ab032ef..32cb6f9 100644 (file)
@@ -8469,43 +8469,12 @@ static void kvm_timer_init(void)
                          kvmclock_cpu_online, kvmclock_cpu_down_prep);
 }
 
-static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
-{
-       return vcpu && vcpu->arch.handling_intr_from_guest;
-}
-
-static unsigned int kvm_guest_state(void)
-{
-       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-       unsigned int state;
-
-       if (!kvm_pmi_in_guest(vcpu))
-               return 0;
-
-       state = PERF_GUEST_ACTIVE;
-       if (static_call(kvm_x86_get_cpl)(vcpu))
-               state |= PERF_GUEST_USER;
-
-       return state;
-}
-
-static unsigned long kvm_guest_get_ip(void)
-{
-       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
-       /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
-       if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
-               return 0;
-
-       return kvm_rip_read(vcpu);
-}
-
 static unsigned int kvm_handle_intel_pt_intr(void)
 {
        struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
 
        /* '0' on failure so that the !PT case can use a RET0 static call. */
-       if (!kvm_pmi_in_guest(vcpu))
+       if (!kvm_arch_pmi_in_guest(vcpu))
                return 0;
 
        kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -8514,12 +8483,6 @@ static unsigned int kvm_handle_intel_pt_intr(void)
        return 1;
 }
 
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
-       .state                  = kvm_guest_state,
-       .get_ip                 = kvm_guest_get_ip,
-       .handle_intel_pt_intr   = NULL,
-};
-
 #ifdef CONFIG_X86_64
 static void pvclock_gtod_update_fn(struct work_struct *work)
 {
@@ -11229,9 +11192,11 @@ int kvm_arch_hardware_setup(void *opaque)
        memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
        kvm_ops_static_call_update();
 
+       /* Temporary ugliness. */
        if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
-               kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
-       perf_register_guest_info_callbacks(&kvm_guest_cbs);
+               kvm_register_perf_callbacks(kvm_handle_intel_pt_intr);
+       else
+               kvm_register_perf_callbacks(NULL);
 
        if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
                supported_xss = 0;
@@ -11260,8 +11225,7 @@ int kvm_arch_hardware_setup(void *opaque)
 
 void kvm_arch_hardware_unsetup(void)
 {
-       perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
-       kvm_guest_cbs.handle_intel_pt_intr = NULL;
+       kvm_unregister_perf_callbacks();
 
        static_call(kvm_x86_hardware_unsetup)();
 }
@@ -11852,6 +11816,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
        return vcpu->arch.preempted_in_kernel;
 }
 
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+       return kvm_rip_read(vcpu);
+}
+
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
index 9e0667e..9df7ab2 100644 (file)
@@ -1170,6 +1170,16 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
 }
 #endif
 
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
 void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_arch_sync_events(struct kvm *kvm);
index d317245..76778dd 100644 (file)
@@ -5479,6 +5479,50 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
         return &kvm_running_vcpu;
 }
 
+#ifdef CONFIG_GUEST_PERF_EVENTS
+static unsigned int kvm_guest_state(void)
+{
+       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+       unsigned int state;
+
+       if (!kvm_arch_pmi_in_guest(vcpu))
+               return 0;
+
+       state = PERF_GUEST_ACTIVE;
+       if (!kvm_arch_vcpu_in_kernel(vcpu))
+               state |= PERF_GUEST_USER;
+
+       return state;
+}
+
+static unsigned long kvm_guest_get_ip(void)
+{
+       struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+       /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
+       if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
+               return 0;
+
+       return kvm_arch_vcpu_get_ip(vcpu);
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+       .state                  = kvm_guest_state,
+       .get_ip                 = kvm_guest_get_ip,
+       .handle_intel_pt_intr   = NULL,
+};
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
+{
+       kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
+       perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+void kvm_unregister_perf_callbacks(void)
+{
+       perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+}
+#endif
+
 struct kvm_cpu_compat_check {
        void *opaque;
        int *ret;