3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
47 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
49 #define mod_64(x, y) ((x) % (y))
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...) do {} while (0)
60 /* 14 is the version for Xeon and Pentium 8.4.8*/
61 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
62 #define LAPIC_MMIO_LENGTH (1 << 12)
63 /* followed define is not in apicdef.h */
64 #define APIC_SHORT_MASK 0xc0000
65 #define APIC_DEST_NOSHORT 0x0
66 #define APIC_DEST_MASK 0x800
67 #define MAX_APIC_VECTOR 256
68 #define APIC_VECTORS_PER_REG 32
70 #define APIC_BROADCAST 0xFF
71 #define X2APIC_BROADCAST 0xFFFFFFFFul
73 static bool lapic_timer_advance_adjust_done = false;
74 #define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
75 /* step-by-step approximation to mitigate fluctuation */
76 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
78 static inline int apic_test_vector(int vec, void *bitmap)
80 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
83 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
85 struct kvm_lapic *apic = vcpu->arch.apic;
87 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
88 apic_test_vector(vector, apic->regs + APIC_IRR);
91 static inline void apic_clear_vector(int vec, void *bitmap)
93 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
96 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
98 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
101 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
103 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
106 struct static_key_deferred apic_hw_disabled __read_mostly;
107 struct static_key_deferred apic_sw_disabled __read_mostly;
109 static inline int apic_enabled(struct kvm_lapic *apic)
111 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
115 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
118 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
119 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
121 static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
123 return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
126 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
128 return apic->vcpu->vcpu_id;
131 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
132 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
134 case KVM_APIC_MODE_X2APIC: {
135 u32 offset = (dest_id >> 16) * 16;
136 u32 max_apic_id = map->max_apic_id;
138 if (offset <= max_apic_id) {
139 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
141 offset = array_index_nospec(offset, map->max_apic_id + 1);
142 *cluster = &map->phys_map[offset];
143 *mask = dest_id & (0xffff >> (16 - cluster_size));
150 case KVM_APIC_MODE_XAPIC_FLAT:
151 *cluster = map->xapic_flat_map;
152 *mask = dest_id & 0xff;
154 case KVM_APIC_MODE_XAPIC_CLUSTER:
155 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
156 *mask = dest_id & 0xf;
164 static void kvm_apic_map_free(struct rcu_head *rcu)
166 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
171 static void recalculate_apic_map(struct kvm *kvm)
173 struct kvm_apic_map *new, *old = NULL;
174 struct kvm_vcpu *vcpu;
176 u32 max_id = 255; /* enough space for any xAPIC ID */
178 mutex_lock(&kvm->arch.apic_map_lock);
180 kvm_for_each_vcpu(i, vcpu, kvm)
181 if (kvm_apic_present(vcpu))
182 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
184 new = kvzalloc(sizeof(struct kvm_apic_map) +
185 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
191 new->max_apic_id = max_id;
193 kvm_for_each_vcpu(i, vcpu, kvm) {
194 struct kvm_lapic *apic = vcpu->arch.apic;
195 struct kvm_lapic **cluster;
201 if (!kvm_apic_present(vcpu))
204 xapic_id = kvm_xapic_id(apic);
205 x2apic_id = kvm_x2apic_id(apic);
207 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
208 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
209 x2apic_id <= new->max_apic_id)
210 new->phys_map[x2apic_id] = apic;
212 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
213 * prevent them from masking VCPUs with APIC ID <= 0xff.
215 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
216 new->phys_map[xapic_id] = apic;
218 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 if (apic_x2apic_mode(apic)) {
221 new->mode |= KVM_APIC_MODE_X2APIC;
223 ldr = GET_APIC_LOGICAL_ID(ldr);
224 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
225 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
227 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
230 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
234 cluster[ffs(mask) - 1] = apic;
237 old = rcu_dereference_protected(kvm->arch.apic_map,
238 lockdep_is_held(&kvm->arch.apic_map_lock));
239 rcu_assign_pointer(kvm->arch.apic_map, new);
240 mutex_unlock(&kvm->arch.apic_map_lock);
243 call_rcu(&old->rcu, kvm_apic_map_free);
245 kvm_make_scan_ioapic_request(kvm);
248 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
250 bool enabled = val & APIC_SPIV_APIC_ENABLED;
252 kvm_lapic_set_reg(apic, APIC_SPIV, val);
254 if (enabled != apic->sw_enabled) {
255 apic->sw_enabled = enabled;
257 static_key_slow_dec_deferred(&apic_sw_disabled);
259 static_key_slow_inc(&apic_sw_disabled.key);
263 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
265 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
266 recalculate_apic_map(apic->vcpu->kvm);
269 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
271 kvm_lapic_set_reg(apic, APIC_LDR, id);
272 recalculate_apic_map(apic->vcpu->kvm);
275 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
277 return ((id >> 4) << 16) | (1 << (id & 0xf));
280 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
282 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
284 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
286 kvm_lapic_set_reg(apic, APIC_ID, id);
287 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
288 recalculate_apic_map(apic->vcpu->kvm);
291 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
293 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
296 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
298 return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
301 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
303 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
306 static inline int apic_lvtt_period(struct kvm_lapic *apic)
308 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
311 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
313 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
316 static inline int apic_lvt_nmi_mode(u32 lvt_val)
318 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
321 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
323 struct kvm_lapic *apic = vcpu->arch.apic;
324 struct kvm_cpuid_entry2 *feat;
325 u32 v = APIC_VERSION;
327 if (!lapic_in_kernel(vcpu))
331 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
332 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
333 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
334 * version first and level-triggered interrupts never get EOIed in
337 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
338 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
339 !ioapic_in_kernel(vcpu->kvm))
340 v |= APIC_LVR_DIRECTED_EOI;
341 kvm_lapic_set_reg(apic, APIC_LVR, v);
344 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
345 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
346 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
347 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
348 LINT_MASK, LINT_MASK, /* LVT0-1 */
349 LVT_MASK /* LVTERR */
352 static int find_highest_vector(void *bitmap)
357 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
358 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
359 reg = bitmap + REG_POS(vec);
361 return __fls(*reg) + vec;
367 static u8 count_vectors(void *bitmap)
373 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
374 reg = bitmap + REG_POS(vec);
375 count += hweight32(*reg);
381 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
384 u32 pir_val, irr_val, prev_irr_val;
387 max_updated_irr = -1;
390 for (i = vec = 0; i <= 7; i++, vec += 32) {
391 pir_val = READ_ONCE(pir[i]);
392 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
394 prev_irr_val = irr_val;
395 irr_val |= xchg(&pir[i], 0);
396 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
397 if (prev_irr_val != irr_val) {
399 __fls(irr_val ^ prev_irr_val) + vec;
403 *max_irr = __fls(irr_val) + vec;
406 return ((max_updated_irr != -1) &&
407 (max_updated_irr == *max_irr));
409 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
411 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
413 struct kvm_lapic *apic = vcpu->arch.apic;
415 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
417 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
419 static inline int apic_search_irr(struct kvm_lapic *apic)
421 return find_highest_vector(apic->regs + APIC_IRR);
424 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
429 * Note that irr_pending is just a hint. It will be always
430 * true with virtual interrupt delivery enabled.
432 if (!apic->irr_pending)
435 result = apic_search_irr(apic);
436 ASSERT(result == -1 || result >= 16);
441 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
443 struct kvm_vcpu *vcpu;
447 if (unlikely(vcpu->arch.apicv_active)) {
448 /* need to update RVI */
449 apic_clear_vector(vec, apic->regs + APIC_IRR);
450 kvm_x86_ops->hwapic_irr_update(vcpu,
451 apic_find_highest_irr(apic));
453 apic->irr_pending = false;
454 apic_clear_vector(vec, apic->regs + APIC_IRR);
455 if (apic_search_irr(apic) != -1)
456 apic->irr_pending = true;
460 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
462 struct kvm_vcpu *vcpu;
464 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
470 * With APIC virtualization enabled, all caching is disabled
471 * because the processor can modify ISR under the hood. Instead
474 if (unlikely(vcpu->arch.apicv_active))
475 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
478 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
480 * ISR (in service register) bit is set when injecting an interrupt.
481 * The highest vector is injected. Thus the latest bit set matches
482 * the highest bit in ISR.
484 apic->highest_isr_cache = vec;
488 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
493 * Note that isr_count is always 1, and highest_isr_cache
494 * is always -1, with APIC virtualization enabled.
496 if (!apic->isr_count)
498 if (likely(apic->highest_isr_cache != -1))
499 return apic->highest_isr_cache;
501 result = find_highest_vector(apic->regs + APIC_ISR);
502 ASSERT(result == -1 || result >= 16);
507 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
509 struct kvm_vcpu *vcpu;
510 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
516 * We do get here for APIC virtualization enabled if the guest
517 * uses the Hyper-V APIC enlightenment. In this case we may need
518 * to trigger a new interrupt delivery by writing the SVI field;
519 * on the other hand isr_count and highest_isr_cache are unused
520 * and must be left alone.
522 if (unlikely(vcpu->arch.apicv_active))
523 kvm_x86_ops->hwapic_isr_update(vcpu,
524 apic_find_highest_isr(apic));
527 BUG_ON(apic->isr_count < 0);
528 apic->highest_isr_cache = -1;
532 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
534 /* This may race with setting of irr in __apic_accept_irq() and
535 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
536 * will cause vmexit immediately and the value will be recalculated
537 * on the next vmentry.
539 return apic_find_highest_irr(vcpu->arch.apic);
541 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
543 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
544 int vector, int level, int trig_mode,
545 struct dest_map *dest_map);
547 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
548 struct dest_map *dest_map)
550 struct kvm_lapic *apic = vcpu->arch.apic;
552 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
553 irq->level, irq->trig_mode, dest_map);
556 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
557 unsigned long ipi_bitmap_high, u32 min,
558 unsigned long icr, int op_64_bit)
561 struct kvm_apic_map *map;
562 struct kvm_vcpu *vcpu;
563 struct kvm_lapic_irq irq = {0};
564 int cluster_size = op_64_bit ? 64 : 32;
567 irq.vector = icr & APIC_VECTOR_MASK;
568 irq.delivery_mode = icr & APIC_MODE_MASK;
569 irq.level = (icr & APIC_INT_ASSERT) != 0;
570 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
572 if (icr & APIC_DEST_MASK)
574 if (icr & APIC_SHORT_MASK)
578 map = rcu_dereference(kvm->arch.apic_map);
580 if (unlikely(!map)) {
585 if (min > map->max_apic_id)
587 /* Bits above cluster_size are masked in the caller. */
588 for_each_set_bit(i, &ipi_bitmap_low,
589 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
590 if (map->phys_map[min + i]) {
591 vcpu = map->phys_map[min + i]->vcpu;
592 count += kvm_apic_set_irq(vcpu, &irq, NULL);
598 if (min > map->max_apic_id)
601 for_each_set_bit(i, &ipi_bitmap_high,
602 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
603 if (map->phys_map[min + i]) {
604 vcpu = map->phys_map[min + i]->vcpu;
605 count += kvm_apic_set_irq(vcpu, &irq, NULL);
614 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
617 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
621 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
624 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
628 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
630 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
633 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
636 if (pv_eoi_get_user(vcpu, &val) < 0)
637 apic_debug("Can't read EOI MSR value: 0x%llx\n",
638 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
642 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
644 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
645 apic_debug("Can't set EOI MSR value: 0x%llx\n",
646 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
649 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
652 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
654 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
655 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
656 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
659 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
662 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
665 if (apic->vcpu->arch.apicv_active)
666 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
668 highest_irr = apic_find_highest_irr(apic);
669 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
674 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
676 u32 tpr, isrv, ppr, old_ppr;
679 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
680 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
681 isr = apic_find_highest_isr(apic);
682 isrv = (isr != -1) ? isr : 0;
684 if ((tpr & 0xf0) >= (isrv & 0xf0))
689 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
690 apic, ppr, isr, isrv);
694 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
696 return ppr < old_ppr;
699 static void apic_update_ppr(struct kvm_lapic *apic)
703 if (__apic_update_ppr(apic, &ppr) &&
704 apic_has_interrupt_for_ppr(apic, ppr) != -1)
705 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
708 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
710 apic_update_ppr(vcpu->arch.apic);
712 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
714 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
716 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
717 apic_update_ppr(apic);
720 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
722 return mda == (apic_x2apic_mode(apic) ?
723 X2APIC_BROADCAST : APIC_BROADCAST);
726 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
728 if (kvm_apic_broadcast(apic, mda))
731 if (apic_x2apic_mode(apic))
732 return mda == kvm_x2apic_id(apic);
735 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
736 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
737 * this allows unique addressing of VCPUs with APIC ID over 0xff.
738 * The 0xff condition is needed because writeable xAPIC ID.
740 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
743 return mda == kvm_xapic_id(apic);
746 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
750 if (kvm_apic_broadcast(apic, mda))
753 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
755 if (apic_x2apic_mode(apic))
756 return ((logical_id >> 16) == (mda >> 16))
757 && (logical_id & mda & 0xffff) != 0;
759 logical_id = GET_APIC_LOGICAL_ID(logical_id);
761 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
763 return (logical_id & mda) != 0;
764 case APIC_DFR_CLUSTER:
765 return ((logical_id >> 4) == (mda >> 4))
766 && (logical_id & mda & 0xf) != 0;
768 apic_debug("Bad DFR vcpu %d: %08x\n",
769 apic->vcpu->vcpu_id, kvm_lapic_get_reg(apic, APIC_DFR));
774 /* The KVM local APIC implementation has two quirks:
776 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
777 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
778 * KVM doesn't do that aliasing.
780 * - in-kernel IOAPIC messages have to be delivered directly to
781 * x2APIC, because the kernel does not support interrupt remapping.
782 * In order to support broadcast without interrupt remapping, x2APIC
783 * rewrites the destination of non-IPI messages from APIC_BROADCAST
784 * to X2APIC_BROADCAST.
786 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
787 * important when userspace wants to use x2APIC-format MSIs, because
788 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
790 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
791 struct kvm_lapic *source, struct kvm_lapic *target)
793 bool ipi = source != NULL;
795 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
796 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
797 return X2APIC_BROADCAST;
802 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
803 int short_hand, unsigned int dest, int dest_mode)
805 struct kvm_lapic *target = vcpu->arch.apic;
806 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
808 apic_debug("target %p, source %p, dest 0x%x, "
809 "dest_mode 0x%x, short_hand 0x%x\n",
810 target, source, dest, dest_mode, short_hand);
813 switch (short_hand) {
814 case APIC_DEST_NOSHORT:
815 if (dest_mode == APIC_DEST_PHYSICAL)
816 return kvm_apic_match_physical_addr(target, mda);
818 return kvm_apic_match_logical_addr(target, mda);
820 return target == source;
821 case APIC_DEST_ALLINC:
823 case APIC_DEST_ALLBUT:
824 return target != source;
826 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
831 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
833 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
834 const unsigned long *bitmap, u32 bitmap_size)
839 mod = vector % dest_vcpus;
841 for (i = 0; i <= mod; i++) {
842 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
843 BUG_ON(idx == bitmap_size);
849 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
851 if (!kvm->arch.disabled_lapic_found) {
852 kvm->arch.disabled_lapic_found = true;
854 "Disabled LAPIC found during irq injection\n");
858 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
859 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
861 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
862 if ((irq->dest_id == APIC_BROADCAST &&
863 map->mode != KVM_APIC_MODE_X2APIC))
865 if (irq->dest_id == X2APIC_BROADCAST)
868 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
869 if (irq->dest_id == (x2apic_ipi ?
870 X2APIC_BROADCAST : APIC_BROADCAST))
877 /* Return true if the interrupt can be handled by using *bitmap as index mask
878 * for valid destinations in *dst array.
879 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
880 * Note: we may have zero kvm_lapic destinations when we return true, which
881 * means that the interrupt should be dropped. In this case, *bitmap would be
882 * zero and *dst undefined.
884 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
885 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
886 struct kvm_apic_map *map, struct kvm_lapic ***dst,
887 unsigned long *bitmap)
891 if (irq->shorthand == APIC_DEST_SELF && src) {
895 } else if (irq->shorthand)
898 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
901 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
902 if (irq->dest_id > map->max_apic_id) {
905 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
906 *dst = &map->phys_map[dest_id];
913 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
917 if (!kvm_lowest_prio_delivery(irq))
920 if (!kvm_vector_hashing_enabled()) {
922 for_each_set_bit(i, bitmap, 16) {
927 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
928 (*dst)[lowest]->vcpu) < 0)
935 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
938 if (!(*dst)[lowest]) {
939 kvm_apic_disabled_lapic_found(kvm);
945 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
950 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
951 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
953 struct kvm_apic_map *map;
954 unsigned long bitmap;
955 struct kvm_lapic **dst = NULL;
961 if (irq->shorthand == APIC_DEST_SELF) {
962 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
967 map = rcu_dereference(kvm->arch.apic_map);
969 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
972 for_each_set_bit(i, &bitmap, 16) {
975 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
984 * This routine tries to handler interrupts in posted mode, here is how
985 * it deals with different cases:
986 * - For single-destination interrupts, handle it in posted mode
987 * - Else if vector hashing is enabled and it is a lowest-priority
988 * interrupt, handle it in posted mode and use the following mechanism
989 * to find the destinaiton vCPU.
990 * 1. For lowest-priority interrupts, store all the possible
991 * destination vCPUs in an array.
992 * 2. Use "guest vector % max number of destination vCPUs" to find
993 * the right destination vCPU in the array for the lowest-priority
995 * - Otherwise, use remapped mode to inject the interrupt.
997 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
998 struct kvm_vcpu **dest_vcpu)
1000 struct kvm_apic_map *map;
1001 unsigned long bitmap;
1002 struct kvm_lapic **dst = NULL;
1009 map = rcu_dereference(kvm->arch.apic_map);
1011 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1012 hweight16(bitmap) == 1) {
1013 unsigned long i = find_first_bit(&bitmap, 16);
1016 *dest_vcpu = dst[i]->vcpu;
1026 * Add a pending IRQ into lapic.
1027 * Return 1 if successfully added and 0 if discarded.
1029 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1030 int vector, int level, int trig_mode,
1031 struct dest_map *dest_map)
1034 struct kvm_vcpu *vcpu = apic->vcpu;
1036 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1038 switch (delivery_mode) {
1039 case APIC_DM_LOWEST:
1040 vcpu->arch.apic_arb_prio++;
1043 if (unlikely(trig_mode && !level))
1046 /* FIXME add logic for vcpu on reset */
1047 if (unlikely(!apic_enabled(apic)))
1053 __set_bit(vcpu->vcpu_id, dest_map->map);
1054 dest_map->vectors[vcpu->vcpu_id] = vector;
1057 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1059 kvm_lapic_set_vector(vector, apic->regs + APIC_TMR);
1061 apic_clear_vector(vector, apic->regs + APIC_TMR);
1064 if (vcpu->arch.apicv_active)
1065 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
1067 kvm_lapic_set_irr(vector, apic);
1069 kvm_make_request(KVM_REQ_EVENT, vcpu);
1070 kvm_vcpu_kick(vcpu);
1076 vcpu->arch.pv.pv_unhalted = 1;
1077 kvm_make_request(KVM_REQ_EVENT, vcpu);
1078 kvm_vcpu_kick(vcpu);
1083 kvm_make_request(KVM_REQ_SMI, vcpu);
1084 kvm_vcpu_kick(vcpu);
1089 kvm_inject_nmi(vcpu);
1090 kvm_vcpu_kick(vcpu);
1094 if (!trig_mode || level) {
1096 /* assumes that there are only KVM_APIC_INIT/SIPI */
1097 apic->pending_events = (1UL << KVM_APIC_INIT);
1098 /* make sure pending_events is visible before sending
1101 kvm_make_request(KVM_REQ_EVENT, vcpu);
1102 kvm_vcpu_kick(vcpu);
1104 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
1109 case APIC_DM_STARTUP:
1110 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
1111 vcpu->vcpu_id, vector);
1113 apic->sipi_vector = vector;
1114 /* make sure sipi_vector is visible for the receiver */
1116 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1117 kvm_make_request(KVM_REQ_EVENT, vcpu);
1118 kvm_vcpu_kick(vcpu);
1121 case APIC_DM_EXTINT:
1123 * Should only be called by kvm_apic_local_deliver() with LVT0,
1124 * before NMI watchdog was enabled. Already handled by
1125 * kvm_apic_accept_pic_intr().
1130 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1137 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1139 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1142 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1144 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1147 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1151 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1152 if (!kvm_ioapic_handles_vector(apic, vector))
1155 /* Request a KVM exit to inform the userspace IOAPIC. */
1156 if (irqchip_split(apic->vcpu->kvm)) {
1157 apic->vcpu->arch.pending_ioapic_eoi = vector;
1158 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1162 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1163 trigger_mode = IOAPIC_LEVEL_TRIG;
1165 trigger_mode = IOAPIC_EDGE_TRIG;
1167 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1170 static int apic_set_eoi(struct kvm_lapic *apic)
1172 int vector = apic_find_highest_isr(apic);
1174 trace_kvm_eoi(apic, vector);
1177 * Not every write EOI will has corresponding ISR,
1178 * one example is when Kernel check timer on setup_IO_APIC
1183 apic_clear_isr(vector, apic);
1184 apic_update_ppr(apic);
1186 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1187 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1189 kvm_ioapic_send_eoi(apic, vector);
1190 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1195 * this interface assumes a trap-like exit, which has already finished
1196 * desired side effect including vISR and vPPR update.
1198 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1200 struct kvm_lapic *apic = vcpu->arch.apic;
1202 trace_kvm_eoi(apic, vector);
1204 kvm_ioapic_send_eoi(apic, vector);
1205 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1207 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1209 static void apic_send_ipi(struct kvm_lapic *apic)
1211 u32 icr_low = kvm_lapic_get_reg(apic, APIC_ICR);
1212 u32 icr_high = kvm_lapic_get_reg(apic, APIC_ICR2);
1213 struct kvm_lapic_irq irq;
1215 irq.vector = icr_low & APIC_VECTOR_MASK;
1216 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1217 irq.dest_mode = icr_low & APIC_DEST_MASK;
1218 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1219 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1220 irq.shorthand = icr_low & APIC_SHORT_MASK;
1221 irq.msi_redir_hint = false;
1222 if (apic_x2apic_mode(apic))
1223 irq.dest_id = icr_high;
1225 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1227 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1229 apic_debug("icr_high 0x%x, icr_low 0x%x, "
1230 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
1231 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x, "
1232 "msi_redir_hint 0x%x\n",
1233 icr_high, icr_low, irq.shorthand, irq.dest_id,
1234 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
1235 irq.vector, irq.msi_redir_hint);
1237 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1240 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1242 ktime_t remaining, now;
1246 ASSERT(apic != NULL);
1248 /* if initial count is 0, current count should also be 0 */
1249 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1250 apic->lapic_timer.period == 0)
1254 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1255 if (ktime_to_ns(remaining) < 0)
1258 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1259 tmcct = div64_u64(ns,
1260 (APIC_BUS_CYCLE_NS * apic->divide_count));
1265 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1267 struct kvm_vcpu *vcpu = apic->vcpu;
1268 struct kvm_run *run = vcpu->run;
1270 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1271 run->tpr_access.rip = kvm_rip_read(vcpu);
1272 run->tpr_access.is_write = write;
1275 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1277 if (apic->vcpu->arch.tpr_access_reporting)
1278 __report_tpr_access(apic, write);
1281 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1285 if (offset >= LAPIC_MMIO_LENGTH)
1290 apic_debug("Access APIC ARBPRI register which is for P6\n");
1293 case APIC_TMCCT: /* Timer CCR */
1294 if (apic_lvtt_tscdeadline(apic))
1297 val = apic_get_tmcct(apic);
1300 apic_update_ppr(apic);
1301 val = kvm_lapic_get_reg(apic, offset);
1304 report_tpr_access(apic, false);
1307 val = kvm_lapic_get_reg(apic, offset);
1314 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1316 return container_of(dev, struct kvm_lapic, dev);
1319 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1322 unsigned char alignment = offset & 0xf;
1324 /* this bitmask has a bit cleared for each reserved register */
1325 static const u64 rmask = 0x43ff01ffffffe70cULL;
1327 if ((alignment + len) > 4) {
1328 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
1333 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
1334 apic_debug("KVM_APIC_READ: read reserved register %x\n",
1339 result = __apic_read(apic, offset & ~0xf);
1341 trace_kvm_apic_read(offset, result);
1347 memcpy(data, (char *)&result + alignment, len);
1350 printk(KERN_ERR "Local APIC read with len = %x, "
1351 "should be 1,2, or 4 instead\n", len);
1356 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1358 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1360 return addr >= apic->base_address &&
1361 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1364 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1365 gpa_t address, int len, void *data)
1367 struct kvm_lapic *apic = to_lapic(this);
1368 u32 offset = address - apic->base_address;
1370 if (!apic_mmio_in_range(apic, address))
1373 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1374 if (!kvm_check_has_quirk(vcpu->kvm,
1375 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1378 memset(data, 0xff, len);
1382 kvm_lapic_reg_read(apic, offset, len, data);
1387 static void update_divide_count(struct kvm_lapic *apic)
1389 u32 tmp1, tmp2, tdcr;
1391 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1393 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1394 apic->divide_count = 0x1 << (tmp2 & 0x7);
1396 apic_debug("timer divide count is 0x%x\n",
1397 apic->divide_count);
1400 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1403 * Do not allow the guest to program periodic timers with small
1404 * interval, since the hrtimers are not throttled by the host
1407 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1408 s64 min_period = min_timer_period_us * 1000LL;
1410 if (apic->lapic_timer.period < min_period) {
1411 pr_info_ratelimited(
1412 "kvm: vcpu %i: requested %lld ns "
1413 "lapic timer period limited to %lld ns\n",
1414 apic->vcpu->vcpu_id,
1415 apic->lapic_timer.period, min_period);
1416 apic->lapic_timer.period = min_period;
1421 static void apic_update_lvtt(struct kvm_lapic *apic)
1423 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1424 apic->lapic_timer.timer_mode_mask;
1426 if (apic->lapic_timer.timer_mode != timer_mode) {
1427 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1428 APIC_LVT_TIMER_TSCDEADLINE)) {
1429 hrtimer_cancel(&apic->lapic_timer.timer);
1430 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1431 apic->lapic_timer.period = 0;
1432 apic->lapic_timer.tscdeadline = 0;
1434 apic->lapic_timer.timer_mode = timer_mode;
1435 limit_periodic_timer_frequency(apic);
1439 static void apic_timer_expired(struct kvm_lapic *apic)
1441 struct kvm_vcpu *vcpu = apic->vcpu;
1442 struct swait_queue_head *q = &vcpu->wq;
1443 struct kvm_timer *ktimer = &apic->lapic_timer;
1445 if (atomic_read(&apic->lapic_timer.pending))
1448 atomic_inc(&apic->lapic_timer.pending);
1449 kvm_set_pending_timer(vcpu);
1452 * For x86, the atomic_inc() is serialized, thus
1453 * using swait_active() is safe.
1455 if (swait_active(q))
1458 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1459 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1463 * On APICv, this test will cause a busy wait
1464 * during a higher-priority task.
1467 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1469 struct kvm_lapic *apic = vcpu->arch.apic;
1470 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1472 if (kvm_apic_hw_enabled(apic)) {
1473 int vec = reg & APIC_VECTOR_MASK;
1474 void *bitmap = apic->regs + APIC_ISR;
1476 if (vcpu->arch.apicv_active)
1477 bitmap = apic->regs + APIC_IRR;
1479 if (apic_test_vector(vec, bitmap))
1485 void wait_lapic_expire(struct kvm_vcpu *vcpu)
1487 struct kvm_lapic *apic = vcpu->arch.apic;
1488 u64 guest_tsc, tsc_deadline, ns;
1490 if (!lapic_in_kernel(vcpu))
1493 if (apic->lapic_timer.expired_tscdeadline == 0)
1496 if (!lapic_timer_int_injected(vcpu))
1499 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1500 apic->lapic_timer.expired_tscdeadline = 0;
1501 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1502 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1504 /* __delay is delay_tsc whenever the hardware has TSC, thus always. */
1505 if (guest_tsc < tsc_deadline)
1506 __delay(min(tsc_deadline - guest_tsc,
1507 nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
1509 if (!lapic_timer_advance_adjust_done) {
1511 if (guest_tsc < tsc_deadline) {
1512 ns = (tsc_deadline - guest_tsc) * 1000000ULL;
1513 do_div(ns, vcpu->arch.virtual_tsc_khz);
1514 lapic_timer_advance_ns -= min((unsigned int)ns,
1515 lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
1518 ns = (guest_tsc - tsc_deadline) * 1000000ULL;
1519 do_div(ns, vcpu->arch.virtual_tsc_khz);
1520 lapic_timer_advance_ns += min((unsigned int)ns,
1521 lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
1523 if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
1524 lapic_timer_advance_adjust_done = true;
1528 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1530 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
1533 struct kvm_vcpu *vcpu = apic->vcpu;
1534 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1535 unsigned long flags;
1538 if (unlikely(!tscdeadline || !this_tsc_khz))
1541 local_irq_save(flags);
1544 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1545 if (likely(tscdeadline > guest_tsc)) {
1546 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1547 do_div(ns, this_tsc_khz);
1548 expire = ktime_add_ns(now, ns);
1549 expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
1550 hrtimer_start(&apic->lapic_timer.timer,
1551 expire, HRTIMER_MODE_ABS_PINNED);
1553 apic_timer_expired(apic);
1555 local_irq_restore(flags);
1558 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1560 ktime_t now, remaining;
1561 u64 ns_remaining_old, ns_remaining_new;
1563 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1564 * APIC_BUS_CYCLE_NS * apic->divide_count;
1565 limit_periodic_timer_frequency(apic);
1568 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1569 if (ktime_to_ns(remaining) < 0)
1572 ns_remaining_old = ktime_to_ns(remaining);
1573 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1574 apic->divide_count, old_divisor);
1576 apic->lapic_timer.tscdeadline +=
1577 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1578 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1579 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1582 static bool set_target_expiration(struct kvm_lapic *apic)
1588 apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1589 * APIC_BUS_CYCLE_NS * apic->divide_count;
1591 if (!apic->lapic_timer.period) {
1592 apic->lapic_timer.tscdeadline = 0;
1596 limit_periodic_timer_frequency(apic);
1598 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
1600 "timer initial count 0x%x, period %lldns, "
1601 "expire @ 0x%016" PRIx64 ".\n", __func__,
1602 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
1603 kvm_lapic_get_reg(apic, APIC_TMICT),
1604 apic->lapic_timer.period,
1605 ktime_to_ns(ktime_add_ns(now,
1606 apic->lapic_timer.period)));
1608 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1609 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1610 apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1615 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1617 ktime_t now = ktime_get();
1622 * Synchronize both deadlines to the same time source or
1623 * differences in the periods (caused by differences in the
1624 * underlying clocks or numerical approximation errors) will
1625 * cause the two to drift apart over time as the errors
1628 apic->lapic_timer.target_expiration =
1629 ktime_add_ns(apic->lapic_timer.target_expiration,
1630 apic->lapic_timer.period);
1631 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1632 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1633 nsec_to_cycles(apic->vcpu, delta);
1636 static void start_sw_period(struct kvm_lapic *apic)
1638 if (!apic->lapic_timer.period)
1641 if (ktime_after(ktime_get(),
1642 apic->lapic_timer.target_expiration)) {
1643 apic_timer_expired(apic);
1645 if (apic_lvtt_oneshot(apic))
1648 advance_periodic_target_expiration(apic);
1651 hrtimer_start(&apic->lapic_timer.timer,
1652 apic->lapic_timer.target_expiration,
1653 HRTIMER_MODE_ABS_PINNED);
1656 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1658 if (!lapic_in_kernel(vcpu))
1661 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1663 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1665 static void cancel_hv_timer(struct kvm_lapic *apic)
1667 WARN_ON(preemptible());
1668 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1669 kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1670 apic->lapic_timer.hv_timer_in_use = false;
1673 static bool start_hv_timer(struct kvm_lapic *apic)
1675 struct kvm_timer *ktimer = &apic->lapic_timer;
1676 struct kvm_vcpu *vcpu = apic->vcpu;
1679 WARN_ON(preemptible());
1680 if (!kvm_x86_ops->set_hv_timer)
1683 if (!ktimer->tscdeadline)
1686 if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
1689 ktimer->hv_timer_in_use = true;
1690 hrtimer_cancel(&ktimer->timer);
1693 * To simplify handling the periodic timer, leave the hv timer running
1694 * even if the deadline timer has expired, i.e. rely on the resulting
1695 * VM-Exit to recompute the periodic timer's target expiration.
1697 if (!apic_lvtt_period(apic)) {
1699 * Cancel the hv timer if the sw timer fired while the hv timer
1700 * was being programmed, or if the hv timer itself expired.
1702 if (atomic_read(&ktimer->pending)) {
1703 cancel_hv_timer(apic);
1704 } else if (expired) {
1705 apic_timer_expired(apic);
1706 cancel_hv_timer(apic);
1710 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1715 static void start_sw_timer(struct kvm_lapic *apic)
1717 struct kvm_timer *ktimer = &apic->lapic_timer;
1719 WARN_ON(preemptible());
1720 if (apic->lapic_timer.hv_timer_in_use)
1721 cancel_hv_timer(apic);
1722 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1725 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1726 start_sw_period(apic);
1727 else if (apic_lvtt_tscdeadline(apic))
1728 start_sw_tscdeadline(apic);
1729 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1732 static void restart_apic_timer(struct kvm_lapic *apic)
1736 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1739 if (!start_hv_timer(apic))
1740 start_sw_timer(apic);
1745 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1747 struct kvm_lapic *apic = vcpu->arch.apic;
1750 /* If the preempt notifier has already run, it also called apic_timer_expired */
1751 if (!apic->lapic_timer.hv_timer_in_use)
1753 WARN_ON(swait_active(&vcpu->wq));
1754 cancel_hv_timer(apic);
1755 apic_timer_expired(apic);
1757 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1758 advance_periodic_target_expiration(apic);
1759 restart_apic_timer(apic);
1764 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1766 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1768 restart_apic_timer(vcpu->arch.apic);
1770 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1772 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1774 struct kvm_lapic *apic = vcpu->arch.apic;
1777 /* Possibly the TSC deadline timer is not enabled yet */
1778 if (apic->lapic_timer.hv_timer_in_use)
1779 start_sw_timer(apic);
1782 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1784 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1786 struct kvm_lapic *apic = vcpu->arch.apic;
1788 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1789 restart_apic_timer(apic);
1792 static void start_apic_timer(struct kvm_lapic *apic)
1794 atomic_set(&apic->lapic_timer.pending, 0);
1796 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1797 && !set_target_expiration(apic))
1800 restart_apic_timer(apic);
1803 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1805 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1807 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1808 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1809 if (lvt0_in_nmi_mode) {
1810 apic_debug("Receive NMI setting on APIC_LVT0 "
1811 "for cpu %d\n", apic->vcpu->vcpu_id);
1812 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1814 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1818 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1822 trace_kvm_apic_write(reg, val);
1825 case APIC_ID: /* Local APIC ID */
1826 if (!apic_x2apic_mode(apic))
1827 kvm_apic_set_xapic_id(apic, val >> 24);
1833 report_tpr_access(apic, true);
1834 apic_set_tpr(apic, val & 0xff);
1842 if (!apic_x2apic_mode(apic))
1843 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1849 if (!apic_x2apic_mode(apic)) {
1850 kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1851 recalculate_apic_map(apic->vcpu->kvm);
1858 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1859 mask |= APIC_SPIV_DIRECTED_EOI;
1860 apic_set_spiv(apic, val & mask);
1861 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1865 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1866 lvt_val = kvm_lapic_get_reg(apic,
1867 APIC_LVTT + 0x10 * i);
1868 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1869 lvt_val | APIC_LVT_MASKED);
1871 apic_update_lvtt(apic);
1872 atomic_set(&apic->lapic_timer.pending, 0);
1878 /* No delay here, so we always clear the pending bit */
1879 kvm_lapic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
1880 apic_send_ipi(apic);
1884 if (!apic_x2apic_mode(apic))
1886 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1890 apic_manage_nmi_watchdog(apic, val);
1896 /* TODO: Check vector */
1897 if (!kvm_apic_sw_enabled(apic))
1898 val |= APIC_LVT_MASKED;
1900 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
1901 kvm_lapic_set_reg(apic, reg, val);
1906 if (!kvm_apic_sw_enabled(apic))
1907 val |= APIC_LVT_MASKED;
1908 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1909 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1910 apic_update_lvtt(apic);
1914 if (apic_lvtt_tscdeadline(apic))
1917 hrtimer_cancel(&apic->lapic_timer.timer);
1918 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1919 start_apic_timer(apic);
1923 uint32_t old_divisor = apic->divide_count;
1926 apic_debug("KVM_WRITE:TDCR %x\n", val);
1927 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1928 update_divide_count(apic);
1929 if (apic->divide_count != old_divisor &&
1930 apic->lapic_timer.period) {
1931 hrtimer_cancel(&apic->lapic_timer.timer);
1932 update_target_expiration(apic, old_divisor);
1933 restart_apic_timer(apic);
1938 if (apic_x2apic_mode(apic) && val != 0) {
1939 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
1945 if (apic_x2apic_mode(apic)) {
1946 kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1955 apic_debug("Local APIC Write to read-only register %x\n", reg);
1958 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
1960 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1961 gpa_t address, int len, const void *data)
1963 struct kvm_lapic *apic = to_lapic(this);
1964 unsigned int offset = address - apic->base_address;
1967 if (!apic_mmio_in_range(apic, address))
1970 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1971 if (!kvm_check_has_quirk(vcpu->kvm,
1972 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1979 * APIC register must be aligned on 128-bits boundary.
1980 * 32/64/128 bits registers must be accessed thru 32 bits.
1983 if (len != 4 || (offset & 0xf)) {
1984 /* Don't shout loud, $infamous_os would cause only noise. */
1985 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1991 /* too common printing */
1992 if (offset != APIC_EOI)
1993 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1994 "0x%x\n", __func__, offset, len, val);
1996 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2001 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2003 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2005 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2007 /* emulate APIC access in a trap manner */
2008 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2012 /* hw has done the conditional check and inst decode */
2015 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2017 /* TODO: optimize to just emulate side effect w/o one more write */
2018 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2020 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2022 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2024 struct kvm_lapic *apic = vcpu->arch.apic;
2026 if (!vcpu->arch.apic)
2029 hrtimer_cancel(&apic->lapic_timer.timer);
2031 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2032 static_key_slow_dec_deferred(&apic_hw_disabled);
2034 if (!apic->sw_enabled)
2035 static_key_slow_dec_deferred(&apic_sw_disabled);
2038 free_page((unsigned long)apic->regs);
2044 *----------------------------------------------------------------------
2046 *----------------------------------------------------------------------
2048 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2050 struct kvm_lapic *apic = vcpu->arch.apic;
2052 if (!lapic_in_kernel(vcpu) ||
2053 !apic_lvtt_tscdeadline(apic))
2056 return apic->lapic_timer.tscdeadline;
2059 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2061 struct kvm_lapic *apic = vcpu->arch.apic;
2063 if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
2064 apic_lvtt_period(apic))
2067 hrtimer_cancel(&apic->lapic_timer.timer);
2068 apic->lapic_timer.tscdeadline = data;
2069 start_apic_timer(apic);
2072 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2074 struct kvm_lapic *apic = vcpu->arch.apic;
2076 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2077 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2080 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2084 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2086 return (tpr & 0xf0) >> 4;
2089 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2091 u64 old_value = vcpu->arch.apic_base;
2092 struct kvm_lapic *apic = vcpu->arch.apic;
2095 value |= MSR_IA32_APICBASE_BSP;
2097 vcpu->arch.apic_base = value;
2099 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2100 kvm_update_cpuid(vcpu);
2105 /* update jump label if enable bit changes */
2106 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2107 if (value & MSR_IA32_APICBASE_ENABLE) {
2108 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2109 static_key_slow_dec_deferred(&apic_hw_disabled);
2111 static_key_slow_inc(&apic_hw_disabled.key);
2112 recalculate_apic_map(vcpu->kvm);
2116 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2117 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2119 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2120 kvm_x86_ops->set_virtual_apic_mode(vcpu);
2122 apic->base_address = apic->vcpu->arch.apic_base &
2123 MSR_IA32_APICBASE_BASE;
2125 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2126 apic->base_address != APIC_DEFAULT_PHYS_BASE)
2127 pr_warn_once("APIC base relocation is unsupported by KVM");
2129 /* with FSB delivery interrupt, we can restart APIC functionality */
2130 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
2131 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
2135 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2137 struct kvm_lapic *apic = vcpu->arch.apic;
2143 apic_debug("%s\n", __func__);
2145 /* Stop the timer in case it's a reset to an active apic */
2146 hrtimer_cancel(&apic->lapic_timer.timer);
2149 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2150 MSR_IA32_APICBASE_ENABLE);
2151 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2153 kvm_apic_set_version(apic->vcpu);
2155 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2156 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2157 apic_update_lvtt(apic);
2158 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2159 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2160 kvm_lapic_set_reg(apic, APIC_LVT0,
2161 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2162 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2164 kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
2165 apic_set_spiv(apic, 0xff);
2166 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2167 if (!apic_x2apic_mode(apic))
2168 kvm_apic_set_ldr(apic, 0);
2169 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2170 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2171 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2172 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2173 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2174 for (i = 0; i < 8; i++) {
2175 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2176 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2177 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2179 apic->irr_pending = vcpu->arch.apicv_active;
2180 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
2181 apic->highest_isr_cache = -1;
2182 update_divide_count(apic);
2183 atomic_set(&apic->lapic_timer.pending, 0);
2184 if (kvm_vcpu_is_bsp(vcpu))
2185 kvm_lapic_set_base(vcpu,
2186 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2187 vcpu->arch.pv_eoi.msr_val = 0;
2188 apic_update_ppr(apic);
2189 if (vcpu->arch.apicv_active) {
2190 kvm_x86_ops->apicv_post_state_restore(vcpu);
2191 kvm_x86_ops->hwapic_irr_update(vcpu, -1);
2192 kvm_x86_ops->hwapic_isr_update(vcpu, -1);
2195 vcpu->arch.apic_arb_prio = 0;
2196 vcpu->arch.apic_attention = 0;
2198 apic_debug("%s: vcpu=%p, id=0x%x, base_msr="
2199 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
2200 vcpu, kvm_lapic_get_reg(apic, APIC_ID),
2201 vcpu->arch.apic_base, apic->base_address);
2205 *----------------------------------------------------------------------
2207 *----------------------------------------------------------------------
2210 static bool lapic_is_periodic(struct kvm_lapic *apic)
2212 return apic_lvtt_period(apic);
2215 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2217 struct kvm_lapic *apic = vcpu->arch.apic;
2219 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2220 return atomic_read(&apic->lapic_timer.pending);
2225 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2227 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2228 int vector, mode, trig_mode;
2230 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2231 vector = reg & APIC_VECTOR_MASK;
2232 mode = reg & APIC_MODE_MASK;
2233 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2234 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2240 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2242 struct kvm_lapic *apic = vcpu->arch.apic;
2245 kvm_apic_local_deliver(apic, APIC_LVT0);
2248 static const struct kvm_io_device_ops apic_mmio_ops = {
2249 .read = apic_mmio_read,
2250 .write = apic_mmio_write,
2253 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2255 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2256 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2258 apic_timer_expired(apic);
2260 if (lapic_is_periodic(apic)) {
2261 advance_periodic_target_expiration(apic);
2262 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2263 return HRTIMER_RESTART;
2265 return HRTIMER_NORESTART;
2268 int kvm_create_lapic(struct kvm_vcpu *vcpu)
2270 struct kvm_lapic *apic;
2272 ASSERT(vcpu != NULL);
2273 apic_debug("apic_init %d\n", vcpu->vcpu_id);
2275 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2279 vcpu->arch.apic = apic;
2281 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2283 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2285 goto nomem_free_apic;
2289 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2290 HRTIMER_MODE_ABS_PINNED);
2291 apic->lapic_timer.timer.function = apic_timer_fn;
2294 * APIC is created enabled. This will prevent kvm_lapic_set_base from
2295 * thinking that APIC satet has changed.
2297 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2298 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2299 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2308 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2310 struct kvm_lapic *apic = vcpu->arch.apic;
2313 if (!apic_enabled(apic))
2316 __apic_update_ppr(apic, &ppr);
2317 return apic_has_interrupt_for_ppr(apic, ppr);
2320 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2322 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2325 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2327 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2328 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2333 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2335 struct kvm_lapic *apic = vcpu->arch.apic;
2337 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2338 kvm_apic_local_deliver(apic, APIC_LVTT);
2339 if (apic_lvtt_tscdeadline(apic))
2340 apic->lapic_timer.tscdeadline = 0;
2341 if (apic_lvtt_oneshot(apic)) {
2342 apic->lapic_timer.tscdeadline = 0;
2343 apic->lapic_timer.target_expiration = 0;
2345 atomic_set(&apic->lapic_timer.pending, 0);
2349 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2351 int vector = kvm_apic_has_interrupt(vcpu);
2352 struct kvm_lapic *apic = vcpu->arch.apic;
2359 * We get here even with APIC virtualization enabled, if doing
2360 * nested virtualization and L1 runs with the "acknowledge interrupt
2361 * on exit" mode. Then we cannot inject the interrupt via RVI,
2362 * because the process would deliver it through the IDT.
2365 apic_clear_irr(vector, apic);
2366 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2368 * For auto-EOI interrupts, there might be another pending
2369 * interrupt above PPR, so check whether to raise another
2372 apic_update_ppr(apic);
2375 * For normal interrupts, PPR has been raised and there cannot
2376 * be a higher-priority pending interrupt---except if there was
2377 * a concurrent interrupt injection, but that would have
2378 * triggered KVM_REQ_EVENT already.
2380 apic_set_isr(vector, apic);
2381 __apic_update_ppr(apic, &ppr);
2387 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2388 struct kvm_lapic_state *s, bool set)
2390 if (apic_x2apic_mode(vcpu->arch.apic)) {
2391 u32 *id = (u32 *)(s->regs + APIC_ID);
2392 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2394 if (vcpu->kvm->arch.x2apic_format) {
2395 if (*id != vcpu->vcpu_id)
2404 /* In x2APIC mode, the LDR is fixed and based on the id */
2406 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2412 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2414 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2415 return kvm_apic_state_fixup(vcpu, s, false);
2418 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2420 struct kvm_lapic *apic = vcpu->arch.apic;
2424 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2425 /* set SPIV separately to get count of SW disabled APICs right */
2426 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2428 r = kvm_apic_state_fixup(vcpu, s, true);
2431 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2433 recalculate_apic_map(vcpu->kvm);
2434 kvm_apic_set_version(vcpu);
2436 apic_update_ppr(apic);
2437 hrtimer_cancel(&apic->lapic_timer.timer);
2438 apic_update_lvtt(apic);
2439 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2440 update_divide_count(apic);
2441 start_apic_timer(apic);
2442 apic->irr_pending = true;
2443 apic->isr_count = vcpu->arch.apicv_active ?
2444 1 : count_vectors(apic->regs + APIC_ISR);
2445 apic->highest_isr_cache = -1;
2446 if (vcpu->arch.apicv_active) {
2447 kvm_x86_ops->apicv_post_state_restore(vcpu);
2448 kvm_x86_ops->hwapic_irr_update(vcpu,
2449 apic_find_highest_irr(apic));
2450 kvm_x86_ops->hwapic_isr_update(vcpu,
2451 apic_find_highest_isr(apic));
2453 kvm_make_request(KVM_REQ_EVENT, vcpu);
2454 if (ioapic_in_kernel(vcpu->kvm))
2455 kvm_rtc_eoi_tracking_restore_one(vcpu);
2457 vcpu->arch.apic_arb_prio = 0;
2462 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2464 struct hrtimer *timer;
2466 if (!lapic_in_kernel(vcpu))
2469 timer = &vcpu->arch.apic->lapic_timer.timer;
2470 if (hrtimer_cancel(timer))
2471 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
2475 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2477 * Detect whether guest triggered PV EOI since the
2478 * last entry. If yes, set EOI on guests's behalf.
2479 * Clear PV EOI in guest memory in any case.
2481 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2482 struct kvm_lapic *apic)
2487 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2488 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2490 * KVM_APIC_PV_EOI_PENDING is unset:
2491 * -> host disabled PV EOI.
2492 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2493 * -> host enabled PV EOI, guest did not execute EOI yet.
2494 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2495 * -> host enabled PV EOI, guest executed EOI.
2497 BUG_ON(!pv_eoi_enabled(vcpu));
2498 pending = pv_eoi_get_pending(vcpu);
2500 * Clear pending bit in any case: it will be set again on vmentry.
2501 * While this might not be ideal from performance point of view,
2502 * this makes sure pv eoi is only enabled when we know it's safe.
2504 pv_eoi_clr_pending(vcpu);
2507 vector = apic_set_eoi(apic);
2508 trace_kvm_pv_eoi(apic, vector);
2511 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2515 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2516 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2518 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2521 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2525 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2529 * apic_sync_pv_eoi_to_guest - called before vmentry
2531 * Detect whether it's safe to enable PV EOI and
2534 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2535 struct kvm_lapic *apic)
2537 if (!pv_eoi_enabled(vcpu) ||
2538 /* IRR set or many bits in ISR: could be nested. */
2539 apic->irr_pending ||
2540 /* Cache not set: could be safe but we don't bother. */
2541 apic->highest_isr_cache == -1 ||
2542 /* Need EOI to update ioapic. */
2543 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2545 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2546 * so we need not do anything here.
2551 pv_eoi_set_pending(apic->vcpu);
2554 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2557 int max_irr, max_isr;
2558 struct kvm_lapic *apic = vcpu->arch.apic;
2560 apic_sync_pv_eoi_to_guest(vcpu, apic);
2562 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2565 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2566 max_irr = apic_find_highest_irr(apic);
2569 max_isr = apic_find_highest_isr(apic);
2572 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2574 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2578 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2581 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2582 &vcpu->arch.apic->vapic_cache,
2583 vapic_addr, sizeof(u32)))
2585 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2587 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2590 vcpu->arch.apic->vapic_addr = vapic_addr;
2594 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2596 struct kvm_lapic *apic = vcpu->arch.apic;
2597 u32 reg = (msr - APIC_BASE_MSR) << 4;
2599 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2602 if (reg == APIC_ICR2)
2605 /* if this is ICR write vector before command */
2606 if (reg == APIC_ICR)
2607 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2608 return kvm_lapic_reg_write(apic, reg, (u32)data);
2611 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2613 struct kvm_lapic *apic = vcpu->arch.apic;
2614 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2616 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2619 if (reg == APIC_DFR || reg == APIC_ICR2) {
2620 apic_debug("KVM_APIC_READ: read x2apic reserved register %x\n",
2625 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2627 if (reg == APIC_ICR)
2628 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2630 *data = (((u64)high) << 32) | low;
2635 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2637 struct kvm_lapic *apic = vcpu->arch.apic;
2639 if (!lapic_in_kernel(vcpu))
2642 /* if this is ICR write vector before command */
2643 if (reg == APIC_ICR)
2644 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2645 return kvm_lapic_reg_write(apic, reg, (u32)data);
2648 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2650 struct kvm_lapic *apic = vcpu->arch.apic;
2653 if (!lapic_in_kernel(vcpu))
2656 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2658 if (reg == APIC_ICR)
2659 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2661 *data = (((u64)high) << 32) | low;
2666 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2668 u64 addr = data & ~KVM_MSR_ENABLED;
2669 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2670 unsigned long new_len;
2672 if (!IS_ALIGNED(addr, 4))
2675 vcpu->arch.pv_eoi.msr_val = data;
2676 if (!pv_eoi_enabled(vcpu))
2679 if (addr == ghc->gpa && len <= ghc->len)
2684 return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2687 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2689 struct kvm_lapic *apic = vcpu->arch.apic;
2693 if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2697 * INITs are latched while in SMM. Because an SMM CPU cannot
2698 * be in KVM_MP_STATE_INIT_RECEIVED state, just eat SIPIs
2699 * and delay processing of INIT until the next RSM.
2702 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2703 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2704 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2708 pe = xchg(&apic->pending_events, 0);
2709 if (test_bit(KVM_APIC_INIT, &pe)) {
2710 kvm_vcpu_reset(vcpu, true);
2711 if (kvm_vcpu_is_bsp(apic->vcpu))
2712 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2714 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2716 if (test_bit(KVM_APIC_SIPI, &pe) &&
2717 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2718 /* evaluate pending_events before reading the vector */
2720 sipi_vector = apic->sipi_vector;
2721 apic_debug("vcpu %d received sipi with vector # %x\n",
2722 vcpu->vcpu_id, sipi_vector);
2723 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2724 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2728 void kvm_lapic_init(void)
2730 /* do not patch jump label more than once per second */
2731 jump_label_rate_limit(&apic_hw_disabled, HZ);
2732 jump_label_rate_limit(&apic_sw_disabled, HZ);
2735 void kvm_lapic_exit(void)
2737 static_key_deferred_flush(&apic_hw_disabled);
2738 static_key_deferred_flush(&apic_sw_disabled);