1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * KVM paravirt_ops implementation
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
10 #include <linux/context_tracking.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_para.h>
14 #include <linux/cpu.h>
16 #include <linux/highmem.h>
17 #include <linux/hardirq.h>
18 #include <linux/notifier.h>
19 #include <linux/reboot.h>
20 #include <linux/hash.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/kprobes.h>
24 #include <linux/debugfs.h>
25 #include <linux/nmi.h>
26 #include <linux/swait.h>
27 #include <asm/timer.h>
29 #include <asm/traps.h>
31 #include <asm/tlbflush.h>
33 #include <asm/apicdef.h>
34 #include <asm/hypervisor.h>
36 #include <asm/cpuidle_haltpoll.h>
38 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
40 static int kvmapf = 1;
42 static int __init parse_no_kvmapf(char *arg)
48 early_param("no-kvmapf", parse_no_kvmapf);
50 static int steal_acc = 1;
51 static int __init parse_no_stealacc(char *arg)
57 early_param("no-steal-acc", parse_no_stealacc);
59 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
60 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
61 static int has_steal_clock = 0;
64 * No need for any "IO delay" on KVM
66 static void kvm_io_delay(void)
70 #define KVM_TASK_SLEEP_HASHBITS 8
71 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
73 struct kvm_task_sleep_node {
74 struct hlist_node link;
75 struct swait_queue_head wq;
80 static struct kvm_task_sleep_head {
82 struct hlist_head list;
83 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
85 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
90 hlist_for_each(p, &b->list) {
91 struct kvm_task_sleep_node *n =
92 hlist_entry(p, typeof(*n), link);
93 if (n->token == token)
100 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
102 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
103 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
104 struct kvm_task_sleep_node *e;
106 raw_spin_lock(&b->lock);
107 e = _find_apf_task(b, token);
109 /* dummy entry exist -> wake up was delivered ahead of PF */
111 raw_spin_unlock(&b->lock);
117 n->cpu = smp_processor_id();
118 init_swait_queue_head(&n->wq);
119 hlist_add_head(&n->link, &b->list);
120 raw_spin_unlock(&b->lock);
125 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
126 * @token: Token to identify the sleep node entry
128 * Invoked from the async pagefault handling code or from the VM exit page
129 * fault handler. In both cases RCU is watching.
131 void kvm_async_pf_task_wait_schedule(u32 token)
133 struct kvm_task_sleep_node n;
134 DECLARE_SWAITQUEUE(wait);
136 lockdep_assert_irqs_disabled();
138 if (!kvm_async_pf_queue_task(token, &n))
142 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
143 if (hlist_unhashed(&n.link))
150 finish_swait(&n.wq, &wait);
152 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
154 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
156 hlist_del_init(&n->link);
157 if (swq_has_sleeper(&n->wq))
158 swake_up_one(&n->wq);
161 static void apf_task_wake_all(void)
165 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
166 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
167 struct kvm_task_sleep_node *n;
168 struct hlist_node *p, *next;
170 raw_spin_lock(&b->lock);
171 hlist_for_each_safe(p, next, &b->list) {
172 n = hlist_entry(p, typeof(*n), link);
173 if (n->cpu == smp_processor_id())
174 apf_task_wake_one(n);
176 raw_spin_unlock(&b->lock);
180 void kvm_async_pf_task_wake(u32 token)
182 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
183 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
184 struct kvm_task_sleep_node *n;
192 raw_spin_lock(&b->lock);
193 n = _find_apf_task(b, token);
196 * async PF was not yet handled.
197 * Add dummy entry for the token.
199 n = kzalloc(sizeof(*n), GFP_ATOMIC);
202 * Allocation failed! Busy wait while other cpu
205 raw_spin_unlock(&b->lock);
210 n->cpu = smp_processor_id();
211 init_swait_queue_head(&n->wq);
212 hlist_add_head(&n->link, &b->list);
214 apf_task_wake_one(n);
216 raw_spin_unlock(&b->lock);
219 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
221 u32 kvm_read_and_reset_pf_reason(void)
225 if (__this_cpu_read(apf_reason.enabled)) {
226 reason = __this_cpu_read(apf_reason.reason);
227 __this_cpu_write(apf_reason.reason, 0);
232 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
233 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
235 bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
237 u32 reason = kvm_read_and_reset_pf_reason();
240 case KVM_PV_REASON_PAGE_NOT_PRESENT:
241 case KVM_PV_REASON_PAGE_READY:
248 * If the host managed to inject an async #PF into an interrupt
249 * disabled region, then die hard as this is not going to end well
250 * and the host side is seriously broken.
252 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
253 panic("Host injected async #PF in interrupt disabled region\n");
255 if (reason == KVM_PV_REASON_PAGE_NOT_PRESENT) {
256 if (unlikely(!(user_mode(regs))))
257 panic("Host injected async #PF in kernel mode\n");
258 /* Page is swapped out by the host. */
259 kvm_async_pf_task_wait_schedule(token);
262 kvm_async_pf_task_wake(token);
267 NOKPROBE_SYMBOL(__kvm_handle_async_pf);
269 static void __init paravirt_ops_setup(void)
271 pv_info.name = "KVM";
273 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
274 pv_ops.cpu.io_delay = kvm_io_delay;
276 #ifdef CONFIG_X86_IO_APIC
281 static void kvm_register_steal_time(void)
283 int cpu = smp_processor_id();
284 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
286 if (!has_steal_clock)
289 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
290 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
291 cpu, (unsigned long long) slow_virt_to_phys(st));
294 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
296 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
299 * This relies on __test_and_clear_bit to modify the memory
300 * in a way that is atomic with respect to the local CPU.
301 * The hypervisor only accesses this memory from the local CPU so
302 * there's no need for lock or memory barriers.
303 * An optimization barrier is implied in apic write.
305 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
307 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
310 static void kvm_guest_cpu_init(void)
312 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
315 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
317 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
318 pa |= KVM_ASYNC_PF_ENABLED;
320 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
321 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
323 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
324 __this_cpu_write(apf_reason.enabled, 1);
325 pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
328 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
331 /* Size alignment is implied but just to make it explicit. */
332 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
333 __this_cpu_write(kvm_apic_eoi, 0);
334 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
336 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
340 kvm_register_steal_time();
343 static void kvm_pv_disable_apf(void)
345 if (!__this_cpu_read(apf_reason.enabled))
348 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
349 __this_cpu_write(apf_reason.enabled, 0);
351 pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
354 static void kvm_pv_guest_cpu_reboot(void *unused)
357 * We disable PV EOI before we load a new kernel by kexec,
358 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
359 * New kernel can re-enable when it boots.
361 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
362 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
363 kvm_pv_disable_apf();
364 kvm_disable_steal_time();
367 static int kvm_pv_reboot_notify(struct notifier_block *nb,
368 unsigned long code, void *unused)
370 if (code == SYS_RESTART)
371 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
375 static struct notifier_block kvm_pv_reboot_nb = {
376 .notifier_call = kvm_pv_reboot_notify,
379 static u64 kvm_steal_clock(int cpu)
382 struct kvm_steal_time *src;
385 src = &per_cpu(steal_time, cpu);
387 version = src->version;
391 } while ((version & 1) || (version != src->version));
396 void kvm_disable_steal_time(void)
398 if (!has_steal_clock)
401 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
404 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
406 early_set_memory_decrypted((unsigned long) ptr, size);
410 * Iterate through all possible CPUs and map the memory region pointed
411 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
413 * Note: we iterate through all possible CPUs to ensure that CPUs
414 * hotplugged will have their per-cpu variable already mapped as
417 static void __init sev_map_percpu_data(void)
424 for_each_possible_cpu(cpu) {
425 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
426 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
427 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
431 static bool pv_tlb_flush_supported(void)
433 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
434 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
435 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
438 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
442 static bool pv_ipi_supported(void)
444 return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
447 static bool pv_sched_yield_supported(void)
449 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
450 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
451 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
454 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
456 static void __send_ipi_mask(const struct cpumask *mask, int vector)
459 int cpu, apic_id, icr;
460 int min = 0, max = 0;
462 __uint128_t ipi_bitmap = 0;
468 if (cpumask_empty(mask))
471 local_irq_save(flags);
475 icr = APIC_DM_FIXED | vector;
482 for_each_cpu(cpu, mask) {
483 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
486 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
487 ipi_bitmap <<= min - apic_id;
489 } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
490 max = apic_id < max ? max : apic_id;
492 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
493 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
494 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
498 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
502 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
503 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
504 WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret);
507 local_irq_restore(flags);
510 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
512 __send_ipi_mask(mask, vector);
515 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
517 unsigned int this_cpu = smp_processor_id();
518 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
519 const struct cpumask *local_mask;
521 cpumask_copy(new_mask, mask);
522 cpumask_clear_cpu(this_cpu, new_mask);
523 local_mask = new_mask;
524 __send_ipi_mask(local_mask, vector);
528 * Set the IPI entry points
530 static void kvm_setup_pv_ipi(void)
532 apic->send_IPI_mask = kvm_send_ipi_mask;
533 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
534 pr_info("KVM setup pv IPIs\n");
537 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
541 native_send_call_func_ipi(mask);
543 /* Make sure other vCPUs get a chance to run if they need to. */
544 for_each_cpu(cpu, mask) {
545 if (vcpu_is_preempted(cpu)) {
546 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
552 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
554 native_smp_prepare_cpus(max_cpus);
555 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
556 static_branch_disable(&virt_spin_lock_key);
559 static void __init kvm_smp_prepare_boot_cpu(void)
562 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
563 * shares the guest physical address with the hypervisor.
565 sev_map_percpu_data();
567 kvm_guest_cpu_init();
568 native_smp_prepare_boot_cpu();
572 static void kvm_guest_cpu_offline(void)
574 kvm_disable_steal_time();
575 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
576 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
577 kvm_pv_disable_apf();
581 static int kvm_cpu_online(unsigned int cpu)
584 kvm_guest_cpu_init();
589 static int kvm_cpu_down_prepare(unsigned int cpu)
592 kvm_guest_cpu_offline();
598 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
599 const struct flush_tlb_info *info)
603 struct kvm_steal_time *src;
604 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
606 cpumask_copy(flushmask, cpumask);
608 * We have to call flush only on online vCPUs. And
609 * queue flush_on_enter for pre-empted vCPUs
611 for_each_cpu(cpu, flushmask) {
612 src = &per_cpu(steal_time, cpu);
613 state = READ_ONCE(src->preempted);
614 if ((state & KVM_VCPU_PREEMPTED)) {
615 if (try_cmpxchg(&src->preempted, &state,
616 state | KVM_VCPU_FLUSH_TLB))
617 __cpumask_clear_cpu(cpu, flushmask);
621 native_flush_tlb_others(flushmask, info);
624 static void __init kvm_guest_init(void)
628 paravirt_ops_setup();
629 register_reboot_notifier(&kvm_pv_reboot_nb);
630 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
631 raw_spin_lock_init(&async_pf_sleepers[i].lock);
633 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
635 pv_ops.time.steal_clock = kvm_steal_clock;
638 if (pv_tlb_flush_supported()) {
639 pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
640 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
641 pr_info("KVM setup pv remote TLB flush\n");
644 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
645 apic_set_eoi_write(kvm_guest_apic_eoi_write);
647 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf)
648 static_branch_enable(&kvm_async_pf_enabled);
651 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
652 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
653 if (pv_sched_yield_supported()) {
654 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
655 pr_info("KVM setup pv sched yield\n");
657 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
658 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
659 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
661 sev_map_percpu_data();
662 kvm_guest_cpu_init();
666 * Hard lockup detection is enabled by default. Disable it, as guests
667 * can get false positives too easily, for example if the host is
670 hardlockup_detector_disable();
673 static noinline uint32_t __kvm_cpuid_base(void)
675 if (boot_cpu_data.cpuid_level < 0)
676 return 0; /* So we don't blow up on old processors */
678 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
679 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
684 static inline uint32_t kvm_cpuid_base(void)
686 static int kvm_cpuid_base = -1;
688 if (kvm_cpuid_base == -1)
689 kvm_cpuid_base = __kvm_cpuid_base();
691 return kvm_cpuid_base;
694 bool kvm_para_available(void)
696 return kvm_cpuid_base() != 0;
698 EXPORT_SYMBOL_GPL(kvm_para_available);
700 unsigned int kvm_arch_para_features(void)
702 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
705 unsigned int kvm_arch_para_hints(void)
707 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
709 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
711 static uint32_t __init kvm_detect(void)
713 return kvm_cpuid_base();
716 static void __init kvm_apic_init(void)
718 #if defined(CONFIG_SMP)
719 if (pv_ipi_supported())
724 static void __init kvm_init_platform(void)
727 x86_platform.apic_post_init = kvm_apic_init;
730 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
732 .detect = kvm_detect,
733 .type = X86_HYPER_KVM,
734 .init.guest_late_init = kvm_guest_init,
735 .init.x2apic_available = kvm_para_available,
736 .init.init_platform = kvm_init_platform,
739 static __init int activate_jump_labels(void)
741 if (has_steal_clock) {
742 static_key_slow_inc(¶virt_steal_enabled);
744 static_key_slow_inc(¶virt_steal_rq_enabled);
749 arch_initcall(activate_jump_labels);
751 static __init int kvm_alloc_cpumask(void)
756 if (!kvm_para_available() || nopv)
759 if (pv_tlb_flush_supported())
762 #if defined(CONFIG_SMP)
763 if (pv_ipi_supported())
768 for_each_possible_cpu(cpu) {
769 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
770 GFP_KERNEL, cpu_to_node(cpu));
775 arch_initcall(kvm_alloc_cpumask);
777 #ifdef CONFIG_PARAVIRT_SPINLOCKS
779 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
780 static void kvm_kick_cpu(int cpu)
783 unsigned long flags = 0;
785 apicid = per_cpu(x86_cpu_to_apicid, cpu);
786 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
789 #include <asm/qspinlock.h>
791 static void kvm_wait(u8 *ptr, u8 val)
798 local_irq_save(flags);
800 if (READ_ONCE(*ptr) != val)
804 * halt until it's our turn and kicked. Note that we do safe halt
805 * for irq enabled case to avoid hang when lock info is overwritten
806 * in irq spinlock slowpath and no spurious interrupt occur to save us.
808 if (arch_irqs_disabled_flags(flags))
814 local_irq_restore(flags);
818 __visible bool __kvm_vcpu_is_preempted(long cpu)
820 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
822 return !!(src->preempted & KVM_VCPU_PREEMPTED);
824 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
828 #include <asm/asm-offsets.h>
830 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
833 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
834 * restoring to/from the stack.
837 ".pushsection .text;"
838 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
839 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
840 "__raw_callee_save___kvm_vcpu_is_preempted:"
841 "movq __per_cpu_offset(,%rdi,8), %rax;"
842 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
845 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
851 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
853 void __init kvm_spinlock_init(void)
855 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
856 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
859 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
862 /* Don't use the pvqspinlock code if there is only 1 vCPU. */
863 if (num_possible_cpus() == 1)
866 __pv_init_lock_hash();
867 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
868 pv_ops.lock.queued_spin_unlock =
869 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
870 pv_ops.lock.wait = kvm_wait;
871 pv_ops.lock.kick = kvm_kick_cpu;
873 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
874 pv_ops.lock.vcpu_is_preempted =
875 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
879 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
881 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
883 static void kvm_disable_host_haltpoll(void *i)
885 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
888 static void kvm_enable_host_haltpoll(void *i)
890 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
893 void arch_haltpoll_enable(unsigned int cpu)
895 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
896 pr_err_once("kvm: host does not support poll control\n");
897 pr_err_once("kvm: host upgrade recommended\n");
901 /* Enable guest halt poll disables host halt poll */
902 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
904 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
906 void arch_haltpoll_disable(unsigned int cpu)
908 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
911 /* Enable guest halt poll disables host halt poll */
912 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
914 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);