Merge tag 'gfs2-v5.9-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / arch / x86 / kvm / x86.c
index 232dd2f..d39d6cf 100644 (file)
@@ -820,22 +820,22 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
                return 1;
 
-       if (cr0 & X86_CR0_PG) {
 #ifdef CONFIG_X86_64
-               if (!is_paging(vcpu) && (vcpu->arch.efer & EFER_LME)) {
-                       int cs_db, cs_l;
+       if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
+           (cr0 & X86_CR0_PG)) {
+               int cs_db, cs_l;
 
-                       if (!is_pae(vcpu))
-                               return 1;
-                       kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
-                       if (cs_l)
-                               return 1;
-               } else
-#endif
-               if (is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
-                   !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
+               if (!is_pae(vcpu))
+                       return 1;
+               kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+               if (cs_l)
                        return 1;
        }
+#endif
+       if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
+           is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
+           !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
+               return 1;
 
        if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
                return 1;
@@ -975,7 +975,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
-                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+                                  X86_CR4_SMEP;
 
        if (kvm_valid_cr4(vcpu, cr4))
                return 1;
@@ -1116,14 +1116,12 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
                        vcpu->arch.eff_db[dr] = val;
                break;
        case 4:
-               /* fall through */
        case 6:
                if (!kvm_dr6_valid(val))
                        return -1; /* #GP */
                vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
                break;
        case 5:
-               /* fall through */
        default: /* 7 */
                if (!kvm_dr7_valid(val))
                        return -1; /* #GP */
@@ -1154,12 +1152,10 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
                *val = vcpu->arch.db[array_index_nospec(dr, size)];
                break;
        case 4:
-               /* fall through */
        case 6:
                *val = vcpu->arch.dr6;
                break;
        case 5:
-               /* fall through */
        default: /* 7 */
                *val = vcpu->arch.dr7;
                break;
@@ -3051,7 +3047,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
        case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
-               pr = true; /* fall through */
+               pr = true;
+               fallthrough;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
        case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
                if (kvm_pmu_is_valid_msr(vcpu, msr))
@@ -4359,7 +4356,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
        case KVM_CAP_HYPERV_SYNIC2:
                if (cap->args[0])
                        return -EINVAL;
-               /* fall through */
+               fallthrough;
 
        case KVM_CAP_HYPERV_SYNIC:
                if (!irqchip_in_kernel(vcpu->kvm))
@@ -8672,7 +8669,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                vcpu->arch.pv.pv_unhalted = false;
                vcpu->arch.mp_state =
                        KVM_MP_STATE_RUNNABLE;
-               /* fall through */
+               fallthrough;
        case KVM_MP_STATE_RUNNABLE:
                vcpu->arch.apf.halted = false;
                break;
@@ -10751,9 +10748,11 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
 {
        struct x86_exception fault;
+       u32 access = error_code &
+               (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
 
        if (!(error_code & PFERR_PRESENT_MASK) ||
-           vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, error_code, &fault) != UNMAPPED_GVA) {
+           vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
                /*
                 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
                 * tables probably do not match the TLB.  Just proceed