KVM: SVM: keep DR6 synchronized with vcpu->arch.dr6
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 4 May 2020 15:28:25 +0000 (11:28 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 8 May 2020 11:43:47 +0000 (07:43 -0400)
kvm_x86_ops.set_dr6 is only ever called with vcpu->arch.dr6 as the
second argument.  Ensure that the VMCB value is synchronized to
vcpu->arch.dr6 on #DB (both "normal" and nested) and nested vmentry, so
that the current value of DR6 is always available in vcpu->arch.dr6.
The get_dr6 callback can just access vcpu->arch.dr6 and becomes redundant.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 8c247bc..de0c288 100644 (file)
@@ -1093,7 +1093,6 @@ struct kvm_x86_ops {
        void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
        void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
-       u64 (*get_dr6)(struct kvm_vcpu *vcpu);
        void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
        void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
        void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
@@ -1624,6 +1623,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
 
 void kvm_define_shared_msr(unsigned index, u32 msr);
 int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
+void kvm_update_dr6(struct kvm_vcpu *vcpu);
 
 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
index adab5b1..9cfa809 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 
 #include <asm/msr-index.h>
+#include <asm/debugreg.h>
 
 #include "kvm_emulate.h"
 #include "trace.h"
@@ -267,7 +268,8 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->vmcb->save.rsp = nested_vmcb->save.rsp;
        svm->vmcb->save.rip = nested_vmcb->save.rip;
        svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
-       svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
+       svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
+       kvm_update_dr6(&svm->vcpu);
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
 
        svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -482,7 +484,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        nested_vmcb->save.rsp    = vmcb->save.rsp;
        nested_vmcb->save.rax    = vmcb->save.rax;
        nested_vmcb->save.dr7    = vmcb->save.dr7;
-       nested_vmcb->save.dr6    = vmcb->save.dr6;
+       nested_vmcb->save.dr6    = svm->vcpu.arch.dr6;
        nested_vmcb->save.cpl    = vmcb->save.cpl;
 
        nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
@@ -606,7 +608,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
 /* DB exceptions for our internal use must not cause vmexit */
 static int nested_svm_intercept_db(struct vcpu_svm *svm)
 {
-       unsigned long dr6;
+       unsigned long dr6 = svm->vmcb->save.dr6;
 
        /* Always catch it and pass it to userspace if debugging.  */
        if (svm->vcpu.guest_debug &
@@ -615,22 +617,28 @@ static int nested_svm_intercept_db(struct vcpu_svm *svm)
 
        /* if we're not singlestepping, it's not ours */
        if (!svm->nmi_singlestep)
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
 
        /* if it's not a singlestep exception, it's not ours */
-       if (kvm_get_dr(&svm->vcpu, 6, &dr6))
-               return NESTED_EXIT_DONE;
        if (!(dr6 & DR6_BS))
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
 
        /* if the guest is singlestepping, it should get the vmexit */
        if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
                disable_nmi_singlestep(svm);
-               return NESTED_EXIT_DONE;
+               goto reflected_db;
        }
 
        /* it's ours, the nested hypervisor must not see this one */
        return NESTED_EXIT_HOST;
+
+reflected_db:
+       /*
+        * Synchronize guest DR6 here just like in db_interception; it will
+        * be moved into the nested VMCB by nested_svm_vmexit.
+        */
+       svm->vcpu.arch.dr6 = dr6;
+       return NESTED_EXIT_DONE;
 }
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
index 38f6aee..f03bffa 100644 (file)
@@ -1672,11 +1672,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
        mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
-static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
-{
-       return to_svm(vcpu)->vmcb->save.dr6;
-}
-
 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1693,7 +1688,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
        get_debugreg(vcpu->arch.db[1], 1);
        get_debugreg(vcpu->arch.db[2], 2);
        get_debugreg(vcpu->arch.db[3], 3);
-       vcpu->arch.dr6 = svm_get_dr6(vcpu);
+       vcpu->arch.dr6 = svm->vmcb->save.dr6;
        vcpu->arch.dr7 = svm->vmcb->save.dr7;
 
        vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
@@ -1739,6 +1734,7 @@ static int db_interception(struct vcpu_svm *svm)
        if (!(svm->vcpu.guest_debug &
              (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
                !svm->nmi_singlestep) {
+               vcpu->arch.dr6 = svm->vmcb->save.dr6;
                kvm_queue_exception(&svm->vcpu, DB_VECTOR);
                return 1;
        }
@@ -3931,7 +3927,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .set_idt = svm_set_idt,
        .get_gdt = svm_get_gdt,
        .set_gdt = svm_set_gdt,
-       .get_dr6 = svm_get_dr6,
        .set_dr6 = svm_set_dr6,
        .set_dr7 = svm_set_dr7,
        .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
index 2384a2d..6153a47 100644 (file)
@@ -4965,11 +4965,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
        return kvm_skip_emulated_instruction(vcpu);
 }
 
-static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.dr6;
-}
-
 static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
 {
 }
@@ -7736,7 +7731,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
-       .get_dr6 = vmx_get_dr6,
        .set_dr6 = vmx_set_dr6,
        .set_dr7 = vmx_set_dr7,
        .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
index f762855..4ea6448 100644 (file)
@@ -104,7 +104,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
-static void kvm_update_dr6(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -1048,7 +1047,7 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
        }
 }
 
-static void kvm_update_dr6(struct kvm_vcpu *vcpu)
+void kvm_update_dr6(struct kvm_vcpu *vcpu)
 {
        if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
                kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
@@ -1129,10 +1128,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
        case 4:
                /* fall through */
        case 6:
-               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
-                       *val = vcpu->arch.dr6;
-               else
-                       *val = kvm_x86_ops.get_dr6(vcpu);
+               *val = vcpu->arch.dr6;
                break;
        case 5:
                /* fall through */