Merge branch 'kvm-fixes-for-5.18-rc5' into HEAD
[linux-2.6-microblaze.git] / arch / x86 / kvm / x86.c
index 07d789b..4790f0d 100644 (file)
@@ -9111,7 +9111,7 @@ static void kvm_apicv_init(struct kvm *kvm)
 
        if (!enable_apicv)
                set_or_clear_apicv_inhibit(inhibits,
-                                          APICV_INHIBIT_REASON_ABSENT, true);
+                                          APICV_INHIBIT_REASON_DISABLE, true);
 }
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9889,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
 }
 
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+       static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
+}
+
 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        if (!lapic_in_kernel(vcpu))
@@ -10099,7 +10104,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        /* Store vcpu->apicv_active before vcpu->mode.  */
        smp_store_release(&vcpu->mode, IN_GUEST_MODE);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        /*
         * 1) We should set ->mode before checking ->requests.  Please see
@@ -10130,7 +10135,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                r = 1;
                goto cancel_injection;
        }
@@ -10256,7 +10261,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        local_irq_enable();
        preempt_enable();
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        /*
         * Profile KVM exit RIPs:
@@ -10286,7 +10291,7 @@ out:
 }
 
 /* Called within kvm->srcu read side.  */
-static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+static inline int vcpu_block(struct kvm_vcpu *vcpu)
 {
        bool hv_timer;
 
@@ -10302,12 +10307,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                if (hv_timer)
                        kvm_lapic_switch_to_sw_timer(vcpu);
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
                        kvm_vcpu_halt(vcpu);
                else
                        kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (hv_timer)
                        kvm_lapic_switch_to_hv_timer(vcpu);
@@ -10349,7 +10354,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
-       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.l1tf_flush_l1d = true;
 
@@ -10357,7 +10361,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {
-                       r = vcpu_block(kvm, vcpu);
+                       r = vcpu_block(vcpu);
                }
 
                if (r <= 0)
@@ -10376,9 +10380,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                }
 
                if (__xfer_to_guest_mode_work_pending()) {
-                       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        r = xfer_to_guest_mode_handle_work(vcpu);
-                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+                       kvm_vcpu_srcu_read_lock(vcpu);
                        if (r)
                                return r;
                }
@@ -10389,12 +10393,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
 
 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
-       int r;
-
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       return r;
+       return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 }
 
 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
@@ -10486,7 +10485,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
-       struct kvm *kvm = vcpu->kvm;
        int r;
 
        vcpu_load(vcpu);
@@ -10494,7 +10492,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        kvm_run->flags = 0;
        kvm_load_guest_fpu(vcpu);
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -10506,9 +10504,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
@@ -10569,7 +10567,7 @@ out:
        if (kvm_run->kvm_valid_regs)
                store_regs(vcpu);
        post_kvm_run_save(vcpu);
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        kvm_sigset_deactivate(vcpu);
        vcpu_put(vcpu);
@@ -10987,6 +10985,9 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       if (!enable_apicv)
+               return;
+
        down_write(&kvm->arch.apicv_update_lock);
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -11198,8 +11199,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
-               if (kvm_apicv_activated(vcpu->kvm))
+
+               /*
+                * Defer evaluating inhibits until the vCPU is first run, as
+                * this vCPU will not get notified of any changes until this
+                * vCPU is visible to other vCPUs (marked online and added to
+                * the set of vCPUs).  Opportunistically mark APICv active as
+                * VMX in particularly is highly unlikely to have inhibits.
+                * Ignore the current per-VM APICv state so that vCPU creation
+                * is guaranteed to run with a deterministic value, the request
+                * will ensure the vCPU gets the correct state before VM-Entry.
+                */
+               if (enable_apicv) {
                        vcpu->arch.apicv_active = true;
+                       kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+               }
        } else
                static_branch_inc(&kvm_has_noapic_vcpu);