1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * KVM paravirt_ops implementation
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
10 #define pr_fmt(fmt) "kvm-guest: " fmt
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/cc_platform.h>
31 #include <linux/efi.h>
32 #include <asm/timer.h>
34 #include <asm/traps.h>
36 #include <asm/tlbflush.h>
38 #include <asm/apicdef.h>
39 #include <asm/hypervisor.h>
41 #include <asm/cpuidle_haltpoll.h>
42 #include <asm/ptrace.h>
43 #include <asm/reboot.h>
45 #include <asm/e820/api.h>
47 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
49 static int kvmapf = 1;
51 static int __init parse_no_kvmapf(char *arg)
57 early_param("no-kvmapf", parse_no_kvmapf);
59 static int steal_acc = 1;
60 static int __init parse_no_stealacc(char *arg)
66 early_param("no-steal-acc", parse_no_stealacc);
68 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
69 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
70 static int has_steal_clock = 0;
73 * No need for any "IO delay" on KVM
75 static void kvm_io_delay(void)
79 #define KVM_TASK_SLEEP_HASHBITS 8
80 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
82 struct kvm_task_sleep_node {
83 struct hlist_node link;
84 struct swait_queue_head wq;
89 static struct kvm_task_sleep_head {
91 struct hlist_head list;
92 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
94 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
99 hlist_for_each(p, &b->list) {
100 struct kvm_task_sleep_node *n =
101 hlist_entry(p, typeof(*n), link);
102 if (n->token == token)
109 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
111 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
112 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
113 struct kvm_task_sleep_node *e;
115 raw_spin_lock(&b->lock);
116 e = _find_apf_task(b, token);
118 /* dummy entry exist -> wake up was delivered ahead of PF */
120 raw_spin_unlock(&b->lock);
126 n->cpu = smp_processor_id();
127 init_swait_queue_head(&n->wq);
128 hlist_add_head(&n->link, &b->list);
129 raw_spin_unlock(&b->lock);
134 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
135 * @token: Token to identify the sleep node entry
137 * Invoked from the async pagefault handling code or from the VM exit page
138 * fault handler. In both cases RCU is watching.
140 void kvm_async_pf_task_wait_schedule(u32 token)
142 struct kvm_task_sleep_node n;
143 DECLARE_SWAITQUEUE(wait);
145 lockdep_assert_irqs_disabled();
147 if (!kvm_async_pf_queue_task(token, &n))
151 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
152 if (hlist_unhashed(&n.link))
159 finish_swait(&n.wq, &wait);
161 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
163 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
165 hlist_del_init(&n->link);
166 if (swq_has_sleeper(&n->wq))
167 swake_up_one(&n->wq);
170 static void apf_task_wake_all(void)
174 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
175 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
176 struct kvm_task_sleep_node *n;
177 struct hlist_node *p, *next;
179 raw_spin_lock(&b->lock);
180 hlist_for_each_safe(p, next, &b->list) {
181 n = hlist_entry(p, typeof(*n), link);
182 if (n->cpu == smp_processor_id())
183 apf_task_wake_one(n);
185 raw_spin_unlock(&b->lock);
189 void kvm_async_pf_task_wake(u32 token)
191 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
192 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
193 struct kvm_task_sleep_node *n;
201 raw_spin_lock(&b->lock);
202 n = _find_apf_task(b, token);
205 * async PF was not yet handled.
206 * Add dummy entry for the token.
208 n = kzalloc(sizeof(*n), GFP_ATOMIC);
211 * Allocation failed! Busy wait while other cpu
214 raw_spin_unlock(&b->lock);
219 n->cpu = smp_processor_id();
220 init_swait_queue_head(&n->wq);
221 hlist_add_head(&n->link, &b->list);
223 apf_task_wake_one(n);
225 raw_spin_unlock(&b->lock);
228 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
230 noinstr u32 kvm_read_and_reset_apf_flags(void)
234 if (__this_cpu_read(apf_reason.enabled)) {
235 flags = __this_cpu_read(apf_reason.flags);
236 __this_cpu_write(apf_reason.flags, 0);
241 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
243 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
245 u32 flags = kvm_read_and_reset_apf_flags();
246 irqentry_state_t state;
251 state = irqentry_enter(regs);
252 instrumentation_begin();
255 * If the host managed to inject an async #PF into an interrupt
256 * disabled region, then die hard as this is not going to end well
257 * and the host side is seriously broken.
259 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
260 panic("Host injected async #PF in interrupt disabled region\n");
262 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
263 if (unlikely(!(user_mode(regs))))
264 panic("Host injected async #PF in kernel mode\n");
265 /* Page is swapped out by the host. */
266 kvm_async_pf_task_wait_schedule(token);
268 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
271 instrumentation_end();
272 irqentry_exit(regs, state);
276 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
278 struct pt_regs *old_regs = set_irq_regs(regs);
283 inc_irq_stat(irq_hv_callback_count);
285 if (__this_cpu_read(apf_reason.enabled)) {
286 token = __this_cpu_read(apf_reason.token);
287 kvm_async_pf_task_wake(token);
288 __this_cpu_write(apf_reason.token, 0);
289 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
292 set_irq_regs(old_regs);
295 static void __init paravirt_ops_setup(void)
297 pv_info.name = "KVM";
299 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
300 pv_ops.cpu.io_delay = kvm_io_delay;
302 #ifdef CONFIG_X86_IO_APIC
307 static void kvm_register_steal_time(void)
309 int cpu = smp_processor_id();
310 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
312 if (!has_steal_clock)
315 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
316 pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
317 (unsigned long long) slow_virt_to_phys(st));
320 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
322 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
325 * This relies on __test_and_clear_bit to modify the memory
326 * in a way that is atomic with respect to the local CPU.
327 * The hypervisor only accesses this memory from the local CPU so
328 * there's no need for lock or memory barriers.
329 * An optimization barrier is implied in apic write.
331 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
333 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
336 static void kvm_guest_cpu_init(void)
338 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
339 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
341 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
343 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
344 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
346 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
347 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
349 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
351 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
352 __this_cpu_write(apf_reason.enabled, 1);
353 pr_debug("setup async PF for cpu %d\n", smp_processor_id());
356 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
359 /* Size alignment is implied but just to make it explicit. */
360 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
361 __this_cpu_write(kvm_apic_eoi, 0);
362 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
364 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
368 kvm_register_steal_time();
371 static void kvm_pv_disable_apf(void)
373 if (!__this_cpu_read(apf_reason.enabled))
376 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
377 __this_cpu_write(apf_reason.enabled, 0);
379 pr_debug("disable async PF for cpu %d\n", smp_processor_id());
382 static void kvm_disable_steal_time(void)
384 if (!has_steal_clock)
387 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
390 static u64 kvm_steal_clock(int cpu)
393 struct kvm_steal_time *src;
396 src = &per_cpu(steal_time, cpu);
398 version = src->version;
402 } while ((version & 1) || (version != src->version));
407 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
409 early_set_memory_decrypted((unsigned long) ptr, size);
413 * Iterate through all possible CPUs and map the memory region pointed
414 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
416 * Note: we iterate through all possible CPUs to ensure that CPUs
417 * hotplugged will have their per-cpu variable already mapped as
420 static void __init sev_map_percpu_data(void)
424 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
427 for_each_possible_cpu(cpu) {
428 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
429 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
430 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
434 static void kvm_guest_cpu_offline(bool shutdown)
436 kvm_disable_steal_time();
437 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
438 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
439 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
440 wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
441 kvm_pv_disable_apf();
447 static int kvm_cpu_online(unsigned int cpu)
451 local_irq_save(flags);
452 kvm_guest_cpu_init();
453 local_irq_restore(flags);
459 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
461 static bool pv_tlb_flush_supported(void)
463 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
464 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
465 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
466 !boot_cpu_has(X86_FEATURE_MWAIT) &&
467 (num_possible_cpus() != 1));
470 static bool pv_ipi_supported(void)
472 return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
473 (num_possible_cpus() != 1));
476 static bool pv_sched_yield_supported(void)
478 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
479 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
480 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
481 !boot_cpu_has(X86_FEATURE_MWAIT) &&
482 (num_possible_cpus() != 1));
485 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
487 static void __send_ipi_mask(const struct cpumask *mask, int vector)
490 int cpu, apic_id, icr;
491 int min = 0, max = 0;
493 __uint128_t ipi_bitmap = 0;
499 if (cpumask_empty(mask))
502 local_irq_save(flags);
506 icr = APIC_DM_FIXED | vector;
513 for_each_cpu(cpu, mask) {
514 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
517 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
518 ipi_bitmap <<= min - apic_id;
520 } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
521 max = apic_id < max ? max : apic_id;
523 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
524 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
525 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
530 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
534 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
535 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
536 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
540 local_irq_restore(flags);
543 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
545 __send_ipi_mask(mask, vector);
548 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
550 unsigned int this_cpu = smp_processor_id();
551 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
552 const struct cpumask *local_mask;
554 cpumask_copy(new_mask, mask);
555 cpumask_clear_cpu(this_cpu, new_mask);
556 local_mask = new_mask;
557 __send_ipi_mask(local_mask, vector);
560 static int __init setup_efi_kvm_sev_migration(void)
562 efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
563 efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
568 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
569 !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
572 if (!efi_enabled(EFI_BOOT))
575 if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
576 pr_info("%s : EFI runtime services are not enabled\n", __func__);
580 size = sizeof(enabled);
582 /* Get variable contents into buffer */
583 status = efi.get_variable(efi_sev_live_migration_enabled,
584 &efi_variable_guid, NULL, &size, &enabled);
586 if (status == EFI_NOT_FOUND) {
587 pr_info("%s : EFI live migration variable not found\n", __func__);
591 if (status != EFI_SUCCESS) {
592 pr_info("%s : EFI variable retrieval failed\n", __func__);
597 pr_info("%s: live migration disabled in EFI\n", __func__);
601 pr_info("%s : live migration enabled in EFI\n", __func__);
602 wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
607 late_initcall(setup_efi_kvm_sev_migration);
610 * Set the IPI entry points
612 static void kvm_setup_pv_ipi(void)
614 apic->send_IPI_mask = kvm_send_ipi_mask;
615 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
616 pr_info("setup PV IPIs\n");
619 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
623 native_send_call_func_ipi(mask);
625 /* Make sure other vCPUs get a chance to run if they need to. */
626 for_each_cpu(cpu, mask) {
627 if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
628 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
634 static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
635 const struct flush_tlb_info *info)
639 struct kvm_steal_time *src;
640 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
642 cpumask_copy(flushmask, cpumask);
644 * We have to call flush only on online vCPUs. And
645 * queue flush_on_enter for pre-empted vCPUs
647 for_each_cpu(cpu, flushmask) {
649 * The local vCPU is never preempted, so we do not explicitly
650 * skip check for local vCPU - it will never be cleared from
653 src = &per_cpu(steal_time, cpu);
654 state = READ_ONCE(src->preempted);
655 if ((state & KVM_VCPU_PREEMPTED)) {
656 if (try_cmpxchg(&src->preempted, &state,
657 state | KVM_VCPU_FLUSH_TLB))
658 __cpumask_clear_cpu(cpu, flushmask);
662 native_flush_tlb_multi(flushmask, info);
665 static __init int kvm_alloc_cpumask(void)
669 if (!kvm_para_available() || nopv)
672 if (pv_tlb_flush_supported() || pv_ipi_supported())
673 for_each_possible_cpu(cpu) {
674 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
675 GFP_KERNEL, cpu_to_node(cpu));
680 arch_initcall(kvm_alloc_cpumask);
682 static void __init kvm_smp_prepare_boot_cpu(void)
685 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
686 * shares the guest physical address with the hypervisor.
688 sev_map_percpu_data();
690 kvm_guest_cpu_init();
691 native_smp_prepare_boot_cpu();
695 static int kvm_cpu_down_prepare(unsigned int cpu)
699 local_irq_save(flags);
700 kvm_guest_cpu_offline(false);
701 local_irq_restore(flags);
707 static int kvm_suspend(void)
709 kvm_guest_cpu_offline(false);
714 static void kvm_resume(void)
716 kvm_cpu_online(raw_smp_processor_id());
719 static struct syscore_ops kvm_syscore_ops = {
720 .suspend = kvm_suspend,
721 .resume = kvm_resume,
724 static void kvm_pv_guest_cpu_reboot(void *unused)
726 kvm_guest_cpu_offline(true);
729 static int kvm_pv_reboot_notify(struct notifier_block *nb,
730 unsigned long code, void *unused)
732 if (code == SYS_RESTART)
733 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
737 static struct notifier_block kvm_pv_reboot_nb = {
738 .notifier_call = kvm_pv_reboot_notify,
742 * After a PV feature is registered, the host will keep writing to the
743 * registered memory location. If the guest happens to shutdown, this memory
744 * won't be valid. In cases like kexec, in which you install a new kernel, this
745 * means a random memory location will be kept being written.
747 #ifdef CONFIG_KEXEC_CORE
748 static void kvm_crash_shutdown(struct pt_regs *regs)
750 kvm_guest_cpu_offline(true);
751 native_machine_crash_shutdown(regs);
755 static void __init kvm_guest_init(void)
759 paravirt_ops_setup();
760 register_reboot_notifier(&kvm_pv_reboot_nb);
761 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
762 raw_spin_lock_init(&async_pf_sleepers[i].lock);
764 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
766 static_call_update(pv_steal_clock, kvm_steal_clock);
769 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
770 apic_set_eoi_write(kvm_guest_apic_eoi_write);
772 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
773 static_branch_enable(&kvm_async_pf_enabled);
774 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
778 if (pv_tlb_flush_supported()) {
779 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
780 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
781 pr_info("KVM setup pv remote TLB flush\n");
784 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
785 if (pv_sched_yield_supported()) {
786 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
787 pr_info("setup PV sched yield\n");
789 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
790 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
791 pr_err("failed to install cpu hotplug callbacks\n");
793 sev_map_percpu_data();
794 kvm_guest_cpu_init();
797 #ifdef CONFIG_KEXEC_CORE
798 machine_ops.crash_shutdown = kvm_crash_shutdown;
801 register_syscore_ops(&kvm_syscore_ops);
804 * Hard lockup detection is enabled by default. Disable it, as guests
805 * can get false positives too easily, for example if the host is
808 hardlockup_detector_disable();
811 static noinline uint32_t __kvm_cpuid_base(void)
813 if (boot_cpu_data.cpuid_level < 0)
814 return 0; /* So we don't blow up on old processors */
816 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
817 return hypervisor_cpuid_base(KVM_SIGNATURE, 0);
822 static inline uint32_t kvm_cpuid_base(void)
824 static int kvm_cpuid_base = -1;
826 if (kvm_cpuid_base == -1)
827 kvm_cpuid_base = __kvm_cpuid_base();
829 return kvm_cpuid_base;
832 bool kvm_para_available(void)
834 return kvm_cpuid_base() != 0;
836 EXPORT_SYMBOL_GPL(kvm_para_available);
838 unsigned int kvm_arch_para_features(void)
840 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
843 unsigned int kvm_arch_para_hints(void)
845 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
847 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
849 static uint32_t __init kvm_detect(void)
851 return kvm_cpuid_base();
854 static void __init kvm_apic_init(void)
857 if (pv_ipi_supported())
862 static bool __init kvm_msi_ext_dest_id(void)
864 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
867 static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
869 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
870 KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
873 static void __init kvm_init_platform(void)
875 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
876 kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
877 unsigned long nr_pages;
880 pv_ops.mmu.notify_page_enc_status_changed =
881 kvm_sev_hc_page_enc_status;
884 * Reset the host's shared pages list related to kernel
885 * specific page encryption status settings before we load a
886 * new kernel by kexec. Reset the page encryption status
887 * during early boot intead of just before kexec to avoid SMP
888 * races during kvm_pv_guest_cpu_reboot().
889 * NOTE: We cannot reset the complete shared pages list
890 * here as we need to retain the UEFI/OVMF firmware
894 for (i = 0; i < e820_table->nr_entries; i++) {
895 struct e820_entry *entry = &e820_table->entries[i];
897 if (entry->type != E820_TYPE_RAM)
900 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
902 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
904 KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
908 * Ensure that _bss_decrypted section is marked as decrypted in the
911 nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
913 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
917 * If not booted using EFI, enable Live migration support.
919 if (!efi_enabled(EFI_BOOT))
920 wrmsrl(MSR_KVM_MIGRATION_CONTROL,
921 KVM_MIGRATION_READY);
924 x86_platform.apic_post_init = kvm_apic_init;
927 #if defined(CONFIG_AMD_MEM_ENCRYPT)
928 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
930 /* RAX and CPL are already in the GHCB */
931 ghcb_set_rbx(ghcb, regs->bx);
932 ghcb_set_rcx(ghcb, regs->cx);
933 ghcb_set_rdx(ghcb, regs->dx);
934 ghcb_set_rsi(ghcb, regs->si);
937 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
939 /* No checking of the return state needed */
944 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
946 .detect = kvm_detect,
947 .type = X86_HYPER_KVM,
948 .init.guest_late_init = kvm_guest_init,
949 .init.x2apic_available = kvm_para_available,
950 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
951 .init.init_platform = kvm_init_platform,
952 #if defined(CONFIG_AMD_MEM_ENCRYPT)
953 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
954 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
958 static __init int activate_jump_labels(void)
960 if (has_steal_clock) {
961 static_key_slow_inc(¶virt_steal_enabled);
963 static_key_slow_inc(¶virt_steal_rq_enabled);
968 arch_initcall(activate_jump_labels);
970 #ifdef CONFIG_PARAVIRT_SPINLOCKS
972 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
973 static void kvm_kick_cpu(int cpu)
976 unsigned long flags = 0;
978 apicid = per_cpu(x86_cpu_to_apicid, cpu);
979 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
982 #include <asm/qspinlock.h>
984 static void kvm_wait(u8 *ptr, u8 val)
990 * halt until it's our turn and kicked. Note that we do safe halt
991 * for irq enabled case to avoid hang when lock info is overwritten
992 * in irq spinlock slowpath and no spurious interrupt occur to save us.
994 if (irqs_disabled()) {
995 if (READ_ONCE(*ptr) == val)
1000 /* safe_halt() will enable IRQ */
1001 if (READ_ONCE(*ptr) == val)
1008 #ifdef CONFIG_X86_32
1009 __visible bool __kvm_vcpu_is_preempted(long cpu)
1011 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
1013 return !!(src->preempted & KVM_VCPU_PREEMPTED);
1015 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
1019 #include <asm/asm-offsets.h>
1021 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
1024 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
1025 * restoring to/from the stack.
1028 ".pushsection .text;"
1029 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
1030 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
1031 "__raw_callee_save___kvm_vcpu_is_preempted:"
1033 "movq __per_cpu_offset(,%rdi,8), %rax;"
1034 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
1037 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
1043 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1045 void __init kvm_spinlock_init(void)
1048 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1049 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1050 * preferred over native qspinlock when vCPU is preempted.
1052 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
1053 pr_info("PV spinlocks disabled, no host support\n");
1058 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1061 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
1062 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1066 if (num_possible_cpus() == 1) {
1067 pr_info("PV spinlocks disabled, single CPU\n");
1072 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1076 pr_info("PV spinlocks enabled\n");
1078 __pv_init_lock_hash();
1079 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1080 pv_ops.lock.queued_spin_unlock =
1081 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1082 pv_ops.lock.wait = kvm_wait;
1083 pv_ops.lock.kick = kvm_kick_cpu;
1085 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
1086 pv_ops.lock.vcpu_is_preempted =
1087 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
1090 * When PV spinlock is enabled which is preferred over
1091 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1092 * Just disable it anyway.
1095 static_branch_disable(&virt_spin_lock_key);
1098 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
1100 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1102 static void kvm_disable_host_haltpoll(void *i)
1104 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
1107 static void kvm_enable_host_haltpoll(void *i)
1109 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1112 void arch_haltpoll_enable(unsigned int cpu)
1114 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1115 pr_err_once("host does not support poll control\n");
1116 pr_err_once("host upgrade recommended\n");
1120 /* Enable guest halt poll disables host halt poll */
1121 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1123 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1125 void arch_haltpoll_disable(unsigned int cpu)
1127 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1130 /* Disable guest halt poll enables host halt poll */
1131 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1133 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);