Merge branch 'kvm-fix-svm-races' into kvm-master
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 31 Mar 2021 11:50:54 +0000 (07:50 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 1 Apr 2021 09:14:05 +0000 (05:14 -0400)
1  2 
arch/x86/kvm/svm/nested.c

@@@ -246,11 -225,17 +246,18 @@@ static bool nested_vmcb_check_controls(
        return true;
  }
  
- static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+ static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
  {
 +      struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
  
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
  
  
        vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
  
 -      if (!vmcb12_lma) {
 -              if (vmcb12->save.cr4 & X86_CR4_PAE) {
 -                      if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
 -                              return false;
 -              } else {
 -                      if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
 -                              return false;
 -              }
 -      } else {
 +      if (vmcb12_lma) {
                if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
                    !(vmcb12->save.cr0 & X86_CR0_PE) ||
 -                  (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
 +                  kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
                        return false;
        }
 -      if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
 +      if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
  
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
  }
  
  static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@@ -395,8 -388,15 +402,15 @@@ static void nested_prepare_vmcb_save(st
        svm->vmcb->save.ds = vmcb12->save.ds;
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
 -      kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
 +      kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@@ -453,24 -453,9 +467,23 @@@ int enter_svm_guest_mode(struct vcpu_sv
  {
        int ret;
  
 +      trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
 +                             vmcb12->save.rip,
 +                             vmcb12->control.int_ctl,
 +                             vmcb12->control.event_inj,
 +                             vmcb12->control.nested_ctl);
 +
 +      trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
 +                                  vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
 +                                  vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
 +                                  vmcb12->control.intercepts[INTERCEPT_WORD3],
 +                                  vmcb12->control.intercepts[INTERCEPT_WORD4],
 +                                  vmcb12->control.intercepts[INTERCEPT_WORD5]);
 +
 +
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
 -      nested_prepare_vmcb_save(svm, vmcb12);
        nested_prepare_vmcb_control(svm);
 +      nested_prepare_vmcb_save(svm, vmcb12);
  
        ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
                                  nested_npt_enabled(svm));