Merge branch 'kvm-fixes' into 'next'
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 21 Oct 2020 22:05:58 +0000 (18:05 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 21 Oct 2020 22:05:58 +0000 (18:05 -0400)
Pick up bugfixes from 5.9, otherwise various tests fail.

1  2 
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c

Simple merge
Simple merge
@@@ -5343,7 -5337,7 +5349,7 @@@ static int handle_ept_violation(struct 
         * would also use advanced VM-exit information for EPT violations to
         * reconstruct the page fault error code.
         */
-       if (unlikely(kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
 -      if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa)))
++      if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
                return kvm_emulate_instruction(vcpu, 0);
  
        return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
@@@ -470,22 -552,12 +470,25 @@@ static inline bool vmx_has_waitpkg(stru
  
  static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
  {
-       return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
+       if (!enable_ept)
+               return true;
+       return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
  }
  
 +static inline bool is_unrestricted_guest(struct kvm_vcpu *vcpu)
 +{
 +      return enable_unrestricted_guest && (!is_guest_mode(vcpu) ||
 +          (secondary_exec_controls_get(to_vmx(vcpu)) &
 +          SECONDARY_EXEC_UNRESTRICTED_GUEST));
 +}
 +
 +bool __vmx_guest_state_valid(struct kvm_vcpu *vcpu);
 +static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
 +{
 +      return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
 +}
 +
  void dump_vmcs(void);
  
  #endif /* __KVM_X86_VMX_H */
@@@ -3400,9 -3231,9 +3401,9 @@@ int kvm_get_msr_common(struct kvm_vcpu 
                 * even when not intercepted. AMD manual doesn't explicitly
                 * state this but appears to behave the same.
                 *
-                * Unconditionally return L1's TSC offset on userspace reads
-                * so that userspace reads and writes always operate on L1's
-                * offset, e.g. to ensure deterministic behavior for migration.
+                * On userspace reads and writes, however, we unconditionally
 -               * operate L1's TSC value to ensure backwards-compatible
++               * return L1's TSC value to ensure backwards-compatible
+                * behavior for migration.
                 */
                u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
                                                            vcpu->arch.tsc_offset;