From: Paolo Bonzini Date: Fri, 8 Apr 2022 16:43:40 +0000 (-0400) Subject: Merge branch 'kvm-older-features' into HEAD X-Git-Tag: microblaze-v5.20~139^2~83 X-Git-Url: http://git.monstr.eu/?a=commitdiff_plain;h=a4cfff3f0f8c07f1f7873a82bdeb3995807dac8c;p=linux-2.6-microblaze.git Merge branch 'kvm-older-features' into HEAD Merge branch for features that did not make it into 5.18: * New ioctls to get/set TSC frequency for a whole VM * Allow userspace to opt out of hypercall patching Nested virtualization improvements for AMD: * Support for "nested nested" optimizations (nested vVMLOAD/VMSAVE, nested vGIF) * Allow AVIC to co-exist with a nested guest running * Fixes for LBR virtualizations when a nested guest is running, and nested LBR virtualization support * PAUSE filtering for nested hypervisors Guest support: * Decoupling of vcpu_is_preempted from PV spinlocks Signed-off-by: Paolo Bonzini --- a4cfff3f0f8c07f1f7873a82bdeb3995807dac8c diff --cc arch/x86/kernel/kvm.c index a22deb58f86d,774d924aeda8..d0bb2b3fb305 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@@ -752,6 -752,41 +752,42 @@@ static void kvm_crash_shutdown(struct p } #endif + #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP) + bool __kvm_vcpu_is_preempted(long cpu); + + __visible bool __kvm_vcpu_is_preempted(long cpu) + { + struct kvm_steal_time *src = &per_cpu(steal_time, cpu); + + return !!(src->preempted & KVM_VCPU_PREEMPTED); + } + PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted); + + #else + + #include + + extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); + + /* + * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and + * restoring to/from the stack. + */ + asm( + ".pushsection .text;" + ".global __raw_callee_save___kvm_vcpu_is_preempted;" + ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;" + "__raw_callee_save___kvm_vcpu_is_preempted:" ++ASM_ENDBR + "movq __per_cpu_offset(,%rdi,8), %rax;" + "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" + "setne %al;" -"ret;" ++ASM_RET + ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;" + ".popsection"); + + #endif + static void __init kvm_guest_init(void) { int i; diff --cc arch/x86/kvm/x86.c index 547ba00ef64f,7a066cf92692..10ad1029f69a --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@@ -3106,14 -3101,15 +3101,14 @@@ static int kvm_guest_time_update(struc vcpu->hv_clock.flags = pvclock_flags; - if (vcpu->pv_time_enabled) - kvm_setup_pvclock_page(v, &vcpu->pv_time, 0); - if (vcpu->xen.vcpu_info_set) - kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_info_cache, - offsetof(struct compat_vcpu_info, time)); - if (vcpu->xen.vcpu_time_info_set) - kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); + if (vcpu->pv_time.active) + kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); + if (vcpu->xen.vcpu_info_cache.active) + kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, + offsetof(struct compat_vcpu_info, time)); + if (vcpu->xen.vcpu_time_info_cache.active) + kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); - if (!v->vcpu_idx) - kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); + kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); return 0; }