arm64: zynqmp: Make zynqmp_firmware driver optional
[linux-2.6-microblaze.git] / arch / x86 / kvm / lapic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <dor.laor@qumranet.com>
13  *   Gregory Haskins <ghaskins@novell.com>
14  *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
25 #include <linux/io.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/page.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
38 #include "irq.h"
39 #include "trace.h"
40 #include "x86.h"
41 #include "cpuid.h"
42 #include "hyperv.h"
43
44 #ifndef CONFIG_X86_64
45 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
46 #else
47 #define mod_64(x, y) ((x) % (y))
48 #endif
49
50 #define PRId64 "d"
51 #define PRIx64 "llx"
52 #define PRIu64 "u"
53 #define PRIo64 "o"
54
55 /* 14 is the version for Xeon and Pentium 8.4.8*/
56 #define APIC_VERSION                    (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
57 #define LAPIC_MMIO_LENGTH               (1 << 12)
58 /* followed define is not in apicdef.h */
59 #define MAX_APIC_VECTOR                 256
60 #define APIC_VECTORS_PER_REG            32
61
62 #define APIC_BROADCAST                  0xFF
63 #define X2APIC_BROADCAST                0xFFFFFFFFul
64
65 static bool lapic_timer_advance_dynamic __read_mostly;
66 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN  100     /* clock cycles */
67 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX  10000   /* clock cycles */
68 #define LAPIC_TIMER_ADVANCE_NS_INIT     1000
69 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
70 /* step-by-step approximation to mitigate fluctuation */
71 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
72
73 static inline int apic_test_vector(int vec, void *bitmap)
74 {
75         return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
76 }
77
78 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
79 {
80         struct kvm_lapic *apic = vcpu->arch.apic;
81
82         return apic_test_vector(vector, apic->regs + APIC_ISR) ||
83                 apic_test_vector(vector, apic->regs + APIC_IRR);
84 }
85
86 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
87 {
88         return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89 }
90
91 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
92 {
93         return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94 }
95
96 struct static_key_deferred apic_hw_disabled __read_mostly;
97 struct static_key_deferred apic_sw_disabled __read_mostly;
98
99 static inline int apic_enabled(struct kvm_lapic *apic)
100 {
101         return kvm_apic_sw_enabled(apic) &&     kvm_apic_hw_enabled(apic);
102 }
103
104 #define LVT_MASK        \
105         (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
106
107 #define LINT_MASK       \
108         (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
109          APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
110
111 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
112 {
113         return apic->vcpu->vcpu_id;
114 }
115
116 bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
117 {
118         return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
119 }
120 EXPORT_SYMBOL_GPL(kvm_can_post_timer_interrupt);
121
122 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
123 {
124         return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
125 }
126
127 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
128                 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
129         switch (map->mode) {
130         case KVM_APIC_MODE_X2APIC: {
131                 u32 offset = (dest_id >> 16) * 16;
132                 u32 max_apic_id = map->max_apic_id;
133
134                 if (offset <= max_apic_id) {
135                         u8 cluster_size = min(max_apic_id - offset + 1, 16U);
136
137                         offset = array_index_nospec(offset, map->max_apic_id + 1);
138                         *cluster = &map->phys_map[offset];
139                         *mask = dest_id & (0xffff >> (16 - cluster_size));
140                 } else {
141                         *mask = 0;
142                 }
143
144                 return true;
145                 }
146         case KVM_APIC_MODE_XAPIC_FLAT:
147                 *cluster = map->xapic_flat_map;
148                 *mask = dest_id & 0xff;
149                 return true;
150         case KVM_APIC_MODE_XAPIC_CLUSTER:
151                 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
152                 *mask = dest_id & 0xf;
153                 return true;
154         default:
155                 /* Not optimized. */
156                 return false;
157         }
158 }
159
160 static void kvm_apic_map_free(struct rcu_head *rcu)
161 {
162         struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
163
164         kvfree(map);
165 }
166
167 static void recalculate_apic_map(struct kvm *kvm)
168 {
169         struct kvm_apic_map *new, *old = NULL;
170         struct kvm_vcpu *vcpu;
171         int i;
172         u32 max_id = 255; /* enough space for any xAPIC ID */
173
174         mutex_lock(&kvm->arch.apic_map_lock);
175
176         kvm_for_each_vcpu(i, vcpu, kvm)
177                 if (kvm_apic_present(vcpu))
178                         max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
179
180         new = kvzalloc(sizeof(struct kvm_apic_map) +
181                            sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
182                            GFP_KERNEL_ACCOUNT);
183
184         if (!new)
185                 goto out;
186
187         new->max_apic_id = max_id;
188
189         kvm_for_each_vcpu(i, vcpu, kvm) {
190                 struct kvm_lapic *apic = vcpu->arch.apic;
191                 struct kvm_lapic **cluster;
192                 u16 mask;
193                 u32 ldr;
194                 u8 xapic_id;
195                 u32 x2apic_id;
196
197                 if (!kvm_apic_present(vcpu))
198                         continue;
199
200                 xapic_id = kvm_xapic_id(apic);
201                 x2apic_id = kvm_x2apic_id(apic);
202
203                 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
204                 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
205                                 x2apic_id <= new->max_apic_id)
206                         new->phys_map[x2apic_id] = apic;
207                 /*
208                  * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
209                  * prevent them from masking VCPUs with APIC ID <= 0xff.
210                  */
211                 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
212                         new->phys_map[xapic_id] = apic;
213
214                 if (!kvm_apic_sw_enabled(apic))
215                         continue;
216
217                 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
218
219                 if (apic_x2apic_mode(apic)) {
220                         new->mode |= KVM_APIC_MODE_X2APIC;
221                 } else if (ldr) {
222                         ldr = GET_APIC_LOGICAL_ID(ldr);
223                         if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
224                                 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
225                         else
226                                 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
227                 }
228
229                 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
230                         continue;
231
232                 if (mask)
233                         cluster[ffs(mask) - 1] = apic;
234         }
235 out:
236         old = rcu_dereference_protected(kvm->arch.apic_map,
237                         lockdep_is_held(&kvm->arch.apic_map_lock));
238         rcu_assign_pointer(kvm->arch.apic_map, new);
239         mutex_unlock(&kvm->arch.apic_map_lock);
240
241         if (old)
242                 call_rcu(&old->rcu, kvm_apic_map_free);
243
244         kvm_make_scan_ioapic_request(kvm);
245 }
246
247 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
248 {
249         bool enabled = val & APIC_SPIV_APIC_ENABLED;
250
251         kvm_lapic_set_reg(apic, APIC_SPIV, val);
252
253         if (enabled != apic->sw_enabled) {
254                 apic->sw_enabled = enabled;
255                 if (enabled)
256                         static_key_slow_dec_deferred(&apic_sw_disabled);
257                 else
258                         static_key_slow_inc(&apic_sw_disabled.key);
259
260                 recalculate_apic_map(apic->vcpu->kvm);
261         }
262 }
263
264 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
265 {
266         kvm_lapic_set_reg(apic, APIC_ID, id << 24);
267         recalculate_apic_map(apic->vcpu->kvm);
268 }
269
270 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
271 {
272         kvm_lapic_set_reg(apic, APIC_LDR, id);
273         recalculate_apic_map(apic->vcpu->kvm);
274 }
275
276 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
277 {
278         return ((id >> 4) << 16) | (1 << (id & 0xf));
279 }
280
281 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
282 {
283         u32 ldr = kvm_apic_calc_x2apic_ldr(id);
284
285         WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
286
287         kvm_lapic_set_reg(apic, APIC_ID, id);
288         kvm_lapic_set_reg(apic, APIC_LDR, ldr);
289         recalculate_apic_map(apic->vcpu->kvm);
290 }
291
292 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
293 {
294         return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
295 }
296
297 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
298 {
299         return kvm_lapic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
300 }
301
302 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
303 {
304         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
305 }
306
307 static inline int apic_lvtt_period(struct kvm_lapic *apic)
308 {
309         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
310 }
311
312 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
313 {
314         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
315 }
316
317 static inline int apic_lvt_nmi_mode(u32 lvt_val)
318 {
319         return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
320 }
321
322 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
323 {
324         struct kvm_lapic *apic = vcpu->arch.apic;
325         struct kvm_cpuid_entry2 *feat;
326         u32 v = APIC_VERSION;
327
328         if (!lapic_in_kernel(vcpu))
329                 return;
330
331         /*
332          * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
333          * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
334          * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
335          * version first and level-triggered interrupts never get EOIed in
336          * IOAPIC.
337          */
338         feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
339         if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))) &&
340             !ioapic_in_kernel(vcpu->kvm))
341                 v |= APIC_LVR_DIRECTED_EOI;
342         kvm_lapic_set_reg(apic, APIC_LVR, v);
343 }
344
345 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
346         LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
347         LVT_MASK | APIC_MODE_MASK,      /* LVTTHMR */
348         LVT_MASK | APIC_MODE_MASK,      /* LVTPC */
349         LINT_MASK, LINT_MASK,   /* LVT0-1 */
350         LVT_MASK                /* LVTERR */
351 };
352
353 static int find_highest_vector(void *bitmap)
354 {
355         int vec;
356         u32 *reg;
357
358         for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
359              vec >= 0; vec -= APIC_VECTORS_PER_REG) {
360                 reg = bitmap + REG_POS(vec);
361                 if (*reg)
362                         return __fls(*reg) + vec;
363         }
364
365         return -1;
366 }
367
368 static u8 count_vectors(void *bitmap)
369 {
370         int vec;
371         u32 *reg;
372         u8 count = 0;
373
374         for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
375                 reg = bitmap + REG_POS(vec);
376                 count += hweight32(*reg);
377         }
378
379         return count;
380 }
381
382 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
383 {
384         u32 i, vec;
385         u32 pir_val, irr_val, prev_irr_val;
386         int max_updated_irr;
387
388         max_updated_irr = -1;
389         *max_irr = -1;
390
391         for (i = vec = 0; i <= 7; i++, vec += 32) {
392                 pir_val = READ_ONCE(pir[i]);
393                 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
394                 if (pir_val) {
395                         prev_irr_val = irr_val;
396                         irr_val |= xchg(&pir[i], 0);
397                         *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
398                         if (prev_irr_val != irr_val) {
399                                 max_updated_irr =
400                                         __fls(irr_val ^ prev_irr_val) + vec;
401                         }
402                 }
403                 if (irr_val)
404                         *max_irr = __fls(irr_val) + vec;
405         }
406
407         return ((max_updated_irr != -1) &&
408                 (max_updated_irr == *max_irr));
409 }
410 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
411
412 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
413 {
414         struct kvm_lapic *apic = vcpu->arch.apic;
415
416         return __kvm_apic_update_irr(pir, apic->regs, max_irr);
417 }
418 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
419
420 static inline int apic_search_irr(struct kvm_lapic *apic)
421 {
422         return find_highest_vector(apic->regs + APIC_IRR);
423 }
424
425 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
426 {
427         int result;
428
429         /*
430          * Note that irr_pending is just a hint. It will be always
431          * true with virtual interrupt delivery enabled.
432          */
433         if (!apic->irr_pending)
434                 return -1;
435
436         result = apic_search_irr(apic);
437         ASSERT(result == -1 || result >= 16);
438
439         return result;
440 }
441
442 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
443 {
444         struct kvm_vcpu *vcpu;
445
446         vcpu = apic->vcpu;
447
448         if (unlikely(vcpu->arch.apicv_active)) {
449                 /* need to update RVI */
450                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
451                 kvm_x86_ops->hwapic_irr_update(vcpu,
452                                 apic_find_highest_irr(apic));
453         } else {
454                 apic->irr_pending = false;
455                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
456                 if (apic_search_irr(apic) != -1)
457                         apic->irr_pending = true;
458         }
459 }
460
461 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
462 {
463         struct kvm_vcpu *vcpu;
464
465         if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
466                 return;
467
468         vcpu = apic->vcpu;
469
470         /*
471          * With APIC virtualization enabled, all caching is disabled
472          * because the processor can modify ISR under the hood.  Instead
473          * just set SVI.
474          */
475         if (unlikely(vcpu->arch.apicv_active))
476                 kvm_x86_ops->hwapic_isr_update(vcpu, vec);
477         else {
478                 ++apic->isr_count;
479                 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
480                 /*
481                  * ISR (in service register) bit is set when injecting an interrupt.
482                  * The highest vector is injected. Thus the latest bit set matches
483                  * the highest bit in ISR.
484                  */
485                 apic->highest_isr_cache = vec;
486         }
487 }
488
489 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
490 {
491         int result;
492
493         /*
494          * Note that isr_count is always 1, and highest_isr_cache
495          * is always -1, with APIC virtualization enabled.
496          */
497         if (!apic->isr_count)
498                 return -1;
499         if (likely(apic->highest_isr_cache != -1))
500                 return apic->highest_isr_cache;
501
502         result = find_highest_vector(apic->regs + APIC_ISR);
503         ASSERT(result == -1 || result >= 16);
504
505         return result;
506 }
507
508 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
509 {
510         struct kvm_vcpu *vcpu;
511         if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
512                 return;
513
514         vcpu = apic->vcpu;
515
516         /*
517          * We do get here for APIC virtualization enabled if the guest
518          * uses the Hyper-V APIC enlightenment.  In this case we may need
519          * to trigger a new interrupt delivery by writing the SVI field;
520          * on the other hand isr_count and highest_isr_cache are unused
521          * and must be left alone.
522          */
523         if (unlikely(vcpu->arch.apicv_active))
524                 kvm_x86_ops->hwapic_isr_update(vcpu,
525                                                apic_find_highest_isr(apic));
526         else {
527                 --apic->isr_count;
528                 BUG_ON(apic->isr_count < 0);
529                 apic->highest_isr_cache = -1;
530         }
531 }
532
533 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
534 {
535         /* This may race with setting of irr in __apic_accept_irq() and
536          * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
537          * will cause vmexit immediately and the value will be recalculated
538          * on the next vmentry.
539          */
540         return apic_find_highest_irr(vcpu->arch.apic);
541 }
542 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
543
544 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
545                              int vector, int level, int trig_mode,
546                              struct dest_map *dest_map);
547
548 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
549                      struct dest_map *dest_map)
550 {
551         struct kvm_lapic *apic = vcpu->arch.apic;
552
553         return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
554                         irq->level, irq->trig_mode, dest_map);
555 }
556
557 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
558                          struct kvm_lapic_irq *irq, u32 min)
559 {
560         int i, count = 0;
561         struct kvm_vcpu *vcpu;
562
563         if (min > map->max_apic_id)
564                 return 0;
565
566         for_each_set_bit(i, ipi_bitmap,
567                 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
568                 if (map->phys_map[min + i]) {
569                         vcpu = map->phys_map[min + i]->vcpu;
570                         count += kvm_apic_set_irq(vcpu, irq, NULL);
571                 }
572         }
573
574         return count;
575 }
576
577 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
578                     unsigned long ipi_bitmap_high, u32 min,
579                     unsigned long icr, int op_64_bit)
580 {
581         struct kvm_apic_map *map;
582         struct kvm_lapic_irq irq = {0};
583         int cluster_size = op_64_bit ? 64 : 32;
584         int count;
585
586         if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
587                 return -KVM_EINVAL;
588
589         irq.vector = icr & APIC_VECTOR_MASK;
590         irq.delivery_mode = icr & APIC_MODE_MASK;
591         irq.level = (icr & APIC_INT_ASSERT) != 0;
592         irq.trig_mode = icr & APIC_INT_LEVELTRIG;
593
594         rcu_read_lock();
595         map = rcu_dereference(kvm->arch.apic_map);
596
597         count = -EOPNOTSUPP;
598         if (likely(map)) {
599                 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
600                 min += cluster_size;
601                 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
602         }
603
604         rcu_read_unlock();
605         return count;
606 }
607
608 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
609 {
610
611         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
612                                       sizeof(val));
613 }
614
615 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
616 {
617
618         return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
619                                       sizeof(*val));
620 }
621
622 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
623 {
624         return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
625 }
626
627 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
628 {
629         u8 val;
630         if (pv_eoi_get_user(vcpu, &val) < 0)
631                 printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
632                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
633         return val & 0x1;
634 }
635
636 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
637 {
638         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
639                 printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
640                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
641                 return;
642         }
643         __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
644 }
645
646 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
647 {
648         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
649                 printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
650                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
651                 return;
652         }
653         __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
654 }
655
656 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
657 {
658         int highest_irr;
659         if (apic->vcpu->arch.apicv_active)
660                 highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
661         else
662                 highest_irr = apic_find_highest_irr(apic);
663         if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
664                 return -1;
665         return highest_irr;
666 }
667
668 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
669 {
670         u32 tpr, isrv, ppr, old_ppr;
671         int isr;
672
673         old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
674         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
675         isr = apic_find_highest_isr(apic);
676         isrv = (isr != -1) ? isr : 0;
677
678         if ((tpr & 0xf0) >= (isrv & 0xf0))
679                 ppr = tpr & 0xff;
680         else
681                 ppr = isrv & 0xf0;
682
683         *new_ppr = ppr;
684         if (old_ppr != ppr)
685                 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
686
687         return ppr < old_ppr;
688 }
689
690 static void apic_update_ppr(struct kvm_lapic *apic)
691 {
692         u32 ppr;
693
694         if (__apic_update_ppr(apic, &ppr) &&
695             apic_has_interrupt_for_ppr(apic, ppr) != -1)
696                 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
697 }
698
699 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
700 {
701         apic_update_ppr(vcpu->arch.apic);
702 }
703 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
704
705 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
706 {
707         kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
708         apic_update_ppr(apic);
709 }
710
711 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
712 {
713         return mda == (apic_x2apic_mode(apic) ?
714                         X2APIC_BROADCAST : APIC_BROADCAST);
715 }
716
717 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
718 {
719         if (kvm_apic_broadcast(apic, mda))
720                 return true;
721
722         if (apic_x2apic_mode(apic))
723                 return mda == kvm_x2apic_id(apic);
724
725         /*
726          * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
727          * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
728          * this allows unique addressing of VCPUs with APIC ID over 0xff.
729          * The 0xff condition is needed because writeable xAPIC ID.
730          */
731         if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
732                 return true;
733
734         return mda == kvm_xapic_id(apic);
735 }
736
737 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
738 {
739         u32 logical_id;
740
741         if (kvm_apic_broadcast(apic, mda))
742                 return true;
743
744         logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
745
746         if (apic_x2apic_mode(apic))
747                 return ((logical_id >> 16) == (mda >> 16))
748                        && (logical_id & mda & 0xffff) != 0;
749
750         logical_id = GET_APIC_LOGICAL_ID(logical_id);
751
752         switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
753         case APIC_DFR_FLAT:
754                 return (logical_id & mda) != 0;
755         case APIC_DFR_CLUSTER:
756                 return ((logical_id >> 4) == (mda >> 4))
757                        && (logical_id & mda & 0xf) != 0;
758         default:
759                 return false;
760         }
761 }
762
763 /* The KVM local APIC implementation has two quirks:
764  *
765  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
766  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
767  *    KVM doesn't do that aliasing.
768  *
769  *  - in-kernel IOAPIC messages have to be delivered directly to
770  *    x2APIC, because the kernel does not support interrupt remapping.
771  *    In order to support broadcast without interrupt remapping, x2APIC
772  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
773  *    to X2APIC_BROADCAST.
774  *
775  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
776  * important when userspace wants to use x2APIC-format MSIs, because
777  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
778  */
779 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
780                 struct kvm_lapic *source, struct kvm_lapic *target)
781 {
782         bool ipi = source != NULL;
783
784         if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
785             !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
786                 return X2APIC_BROADCAST;
787
788         return dest_id;
789 }
790
791 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
792                            int shorthand, unsigned int dest, int dest_mode)
793 {
794         struct kvm_lapic *target = vcpu->arch.apic;
795         u32 mda = kvm_apic_mda(vcpu, dest, source, target);
796
797         ASSERT(target);
798         switch (shorthand) {
799         case APIC_DEST_NOSHORT:
800                 if (dest_mode == APIC_DEST_PHYSICAL)
801                         return kvm_apic_match_physical_addr(target, mda);
802                 else
803                         return kvm_apic_match_logical_addr(target, mda);
804         case APIC_DEST_SELF:
805                 return target == source;
806         case APIC_DEST_ALLINC:
807                 return true;
808         case APIC_DEST_ALLBUT:
809                 return target != source;
810         default:
811                 return false;
812         }
813 }
814 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
815
816 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
817                        const unsigned long *bitmap, u32 bitmap_size)
818 {
819         u32 mod;
820         int i, idx = -1;
821
822         mod = vector % dest_vcpus;
823
824         for (i = 0; i <= mod; i++) {
825                 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
826                 BUG_ON(idx == bitmap_size);
827         }
828
829         return idx;
830 }
831
832 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
833 {
834         if (!kvm->arch.disabled_lapic_found) {
835                 kvm->arch.disabled_lapic_found = true;
836                 printk(KERN_INFO
837                        "Disabled LAPIC found during irq injection\n");
838         }
839 }
840
841 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
842                 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
843 {
844         if (kvm->arch.x2apic_broadcast_quirk_disabled) {
845                 if ((irq->dest_id == APIC_BROADCAST &&
846                                 map->mode != KVM_APIC_MODE_X2APIC))
847                         return true;
848                 if (irq->dest_id == X2APIC_BROADCAST)
849                         return true;
850         } else {
851                 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
852                 if (irq->dest_id == (x2apic_ipi ?
853                                      X2APIC_BROADCAST : APIC_BROADCAST))
854                         return true;
855         }
856
857         return false;
858 }
859
860 /* Return true if the interrupt can be handled by using *bitmap as index mask
861  * for valid destinations in *dst array.
862  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
863  * Note: we may have zero kvm_lapic destinations when we return true, which
864  * means that the interrupt should be dropped.  In this case, *bitmap would be
865  * zero and *dst undefined.
866  */
867 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
868                 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
869                 struct kvm_apic_map *map, struct kvm_lapic ***dst,
870                 unsigned long *bitmap)
871 {
872         int i, lowest;
873
874         if (irq->shorthand == APIC_DEST_SELF && src) {
875                 *dst = src;
876                 *bitmap = 1;
877                 return true;
878         } else if (irq->shorthand)
879                 return false;
880
881         if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
882                 return false;
883
884         if (irq->dest_mode == APIC_DEST_PHYSICAL) {
885                 if (irq->dest_id > map->max_apic_id) {
886                         *bitmap = 0;
887                 } else {
888                         u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
889                         *dst = &map->phys_map[dest_id];
890                         *bitmap = 1;
891                 }
892                 return true;
893         }
894
895         *bitmap = 0;
896         if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
897                                 (u16 *)bitmap))
898                 return false;
899
900         if (!kvm_lowest_prio_delivery(irq))
901                 return true;
902
903         if (!kvm_vector_hashing_enabled()) {
904                 lowest = -1;
905                 for_each_set_bit(i, bitmap, 16) {
906                         if (!(*dst)[i])
907                                 continue;
908                         if (lowest < 0)
909                                 lowest = i;
910                         else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
911                                                 (*dst)[lowest]->vcpu) < 0)
912                                 lowest = i;
913                 }
914         } else {
915                 if (!*bitmap)
916                         return true;
917
918                 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
919                                 bitmap, 16);
920
921                 if (!(*dst)[lowest]) {
922                         kvm_apic_disabled_lapic_found(kvm);
923                         *bitmap = 0;
924                         return true;
925                 }
926         }
927
928         *bitmap = (lowest >= 0) ? 1 << lowest : 0;
929
930         return true;
931 }
932
933 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
934                 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
935 {
936         struct kvm_apic_map *map;
937         unsigned long bitmap;
938         struct kvm_lapic **dst = NULL;
939         int i;
940         bool ret;
941
942         *r = -1;
943
944         if (irq->shorthand == APIC_DEST_SELF) {
945                 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
946                 return true;
947         }
948
949         rcu_read_lock();
950         map = rcu_dereference(kvm->arch.apic_map);
951
952         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
953         if (ret) {
954                 *r = 0;
955                 for_each_set_bit(i, &bitmap, 16) {
956                         if (!dst[i])
957                                 continue;
958                         *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
959                 }
960         }
961
962         rcu_read_unlock();
963         return ret;
964 }
965
966 /*
967  * This routine tries to handle interrupts in posted mode, here is how
968  * it deals with different cases:
969  * - For single-destination interrupts, handle it in posted mode
970  * - Else if vector hashing is enabled and it is a lowest-priority
971  *   interrupt, handle it in posted mode and use the following mechanism
972  *   to find the destination vCPU.
973  *      1. For lowest-priority interrupts, store all the possible
974  *         destination vCPUs in an array.
975  *      2. Use "guest vector % max number of destination vCPUs" to find
976  *         the right destination vCPU in the array for the lowest-priority
977  *         interrupt.
978  * - Otherwise, use remapped mode to inject the interrupt.
979  */
980 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
981                         struct kvm_vcpu **dest_vcpu)
982 {
983         struct kvm_apic_map *map;
984         unsigned long bitmap;
985         struct kvm_lapic **dst = NULL;
986         bool ret = false;
987
988         if (irq->shorthand)
989                 return false;
990
991         rcu_read_lock();
992         map = rcu_dereference(kvm->arch.apic_map);
993
994         if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
995                         hweight16(bitmap) == 1) {
996                 unsigned long i = find_first_bit(&bitmap, 16);
997
998                 if (dst[i]) {
999                         *dest_vcpu = dst[i]->vcpu;
1000                         ret = true;
1001                 }
1002         }
1003
1004         rcu_read_unlock();
1005         return ret;
1006 }
1007
1008 /*
1009  * Add a pending IRQ into lapic.
1010  * Return 1 if successfully added and 0 if discarded.
1011  */
1012 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1013                              int vector, int level, int trig_mode,
1014                              struct dest_map *dest_map)
1015 {
1016         int result = 0;
1017         struct kvm_vcpu *vcpu = apic->vcpu;
1018
1019         trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1020                                   trig_mode, vector);
1021         switch (delivery_mode) {
1022         case APIC_DM_LOWEST:
1023                 vcpu->arch.apic_arb_prio++;
1024                 /* fall through */
1025         case APIC_DM_FIXED:
1026                 if (unlikely(trig_mode && !level))
1027                         break;
1028
1029                 /* FIXME add logic for vcpu on reset */
1030                 if (unlikely(!apic_enabled(apic)))
1031                         break;
1032
1033                 result = 1;
1034
1035                 if (dest_map) {
1036                         __set_bit(vcpu->vcpu_id, dest_map->map);
1037                         dest_map->vectors[vcpu->vcpu_id] = vector;
1038                 }
1039
1040                 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1041                         if (trig_mode)
1042                                 kvm_lapic_set_vector(vector,
1043                                                      apic->regs + APIC_TMR);
1044                         else
1045                                 kvm_lapic_clear_vector(vector,
1046                                                        apic->regs + APIC_TMR);
1047                 }
1048
1049                 if (vcpu->arch.apicv_active)
1050                         kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
1051                 else {
1052                         kvm_lapic_set_irr(vector, apic);
1053
1054                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1055                         kvm_vcpu_kick(vcpu);
1056                 }
1057                 break;
1058
1059         case APIC_DM_REMRD:
1060                 result = 1;
1061                 vcpu->arch.pv.pv_unhalted = 1;
1062                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1063                 kvm_vcpu_kick(vcpu);
1064                 break;
1065
1066         case APIC_DM_SMI:
1067                 result = 1;
1068                 kvm_make_request(KVM_REQ_SMI, vcpu);
1069                 kvm_vcpu_kick(vcpu);
1070                 break;
1071
1072         case APIC_DM_NMI:
1073                 result = 1;
1074                 kvm_inject_nmi(vcpu);
1075                 kvm_vcpu_kick(vcpu);
1076                 break;
1077
1078         case APIC_DM_INIT:
1079                 if (!trig_mode || level) {
1080                         result = 1;
1081                         /* assumes that there are only KVM_APIC_INIT/SIPI */
1082                         apic->pending_events = (1UL << KVM_APIC_INIT);
1083                         /* make sure pending_events is visible before sending
1084                          * the request */
1085                         smp_wmb();
1086                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1087                         kvm_vcpu_kick(vcpu);
1088                 }
1089                 break;
1090
1091         case APIC_DM_STARTUP:
1092                 result = 1;
1093                 apic->sipi_vector = vector;
1094                 /* make sure sipi_vector is visible for the receiver */
1095                 smp_wmb();
1096                 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1097                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1098                 kvm_vcpu_kick(vcpu);
1099                 break;
1100
1101         case APIC_DM_EXTINT:
1102                 /*
1103                  * Should only be called by kvm_apic_local_deliver() with LVT0,
1104                  * before NMI watchdog was enabled. Already handled by
1105                  * kvm_apic_accept_pic_intr().
1106                  */
1107                 break;
1108
1109         default:
1110                 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1111                        delivery_mode);
1112                 break;
1113         }
1114         return result;
1115 }
1116
1117 /*
1118  * This routine identifies the destination vcpus mask meant to receive the
1119  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1120  * out the destination vcpus array and set the bitmap or it traverses to
1121  * each available vcpu to identify the same.
1122  */
1123 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1124                               unsigned long *vcpu_bitmap)
1125 {
1126         struct kvm_lapic **dest_vcpu = NULL;
1127         struct kvm_lapic *src = NULL;
1128         struct kvm_apic_map *map;
1129         struct kvm_vcpu *vcpu;
1130         unsigned long bitmap;
1131         int i, vcpu_idx;
1132         bool ret;
1133
1134         rcu_read_lock();
1135         map = rcu_dereference(kvm->arch.apic_map);
1136
1137         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1138                                           &bitmap);
1139         if (ret) {
1140                 for_each_set_bit(i, &bitmap, 16) {
1141                         if (!dest_vcpu[i])
1142                                 continue;
1143                         vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1144                         __set_bit(vcpu_idx, vcpu_bitmap);
1145                 }
1146         } else {
1147                 kvm_for_each_vcpu(i, vcpu, kvm) {
1148                         if (!kvm_apic_present(vcpu))
1149                                 continue;
1150                         if (!kvm_apic_match_dest(vcpu, NULL,
1151                                                  irq->shorthand,
1152                                                  irq->dest_id,
1153                                                  irq->dest_mode))
1154                                 continue;
1155                         __set_bit(i, vcpu_bitmap);
1156                 }
1157         }
1158         rcu_read_unlock();
1159 }
1160
1161 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1162 {
1163         return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1164 }
1165
1166 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1167 {
1168         return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1169 }
1170
1171 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1172 {
1173         int trigger_mode;
1174
1175         /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1176         if (!kvm_ioapic_handles_vector(apic, vector))
1177                 return;
1178
1179         /* Request a KVM exit to inform the userspace IOAPIC. */
1180         if (irqchip_split(apic->vcpu->kvm)) {
1181                 apic->vcpu->arch.pending_ioapic_eoi = vector;
1182                 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1183                 return;
1184         }
1185
1186         if (apic_test_vector(vector, apic->regs + APIC_TMR))
1187                 trigger_mode = IOAPIC_LEVEL_TRIG;
1188         else
1189                 trigger_mode = IOAPIC_EDGE_TRIG;
1190
1191         kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1192 }
1193
1194 static int apic_set_eoi(struct kvm_lapic *apic)
1195 {
1196         int vector = apic_find_highest_isr(apic);
1197
1198         trace_kvm_eoi(apic, vector);
1199
1200         /*
1201          * Not every write EOI will has corresponding ISR,
1202          * one example is when Kernel check timer on setup_IO_APIC
1203          */
1204         if (vector == -1)
1205                 return vector;
1206
1207         apic_clear_isr(vector, apic);
1208         apic_update_ppr(apic);
1209
1210         if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap))
1211                 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1212
1213         kvm_ioapic_send_eoi(apic, vector);
1214         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1215         return vector;
1216 }
1217
1218 /*
1219  * this interface assumes a trap-like exit, which has already finished
1220  * desired side effect including vISR and vPPR update.
1221  */
1222 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1223 {
1224         struct kvm_lapic *apic = vcpu->arch.apic;
1225
1226         trace_kvm_eoi(apic, vector);
1227
1228         kvm_ioapic_send_eoi(apic, vector);
1229         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1230 }
1231 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1232
1233 static void apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1234 {
1235         struct kvm_lapic_irq irq;
1236
1237         irq.vector = icr_low & APIC_VECTOR_MASK;
1238         irq.delivery_mode = icr_low & APIC_MODE_MASK;
1239         irq.dest_mode = icr_low & APIC_DEST_MASK;
1240         irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1241         irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1242         irq.shorthand = icr_low & APIC_SHORT_MASK;
1243         irq.msi_redir_hint = false;
1244         if (apic_x2apic_mode(apic))
1245                 irq.dest_id = icr_high;
1246         else
1247                 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1248
1249         trace_kvm_apic_ipi(icr_low, irq.dest_id);
1250
1251         kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1252 }
1253
1254 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1255 {
1256         ktime_t remaining, now;
1257         s64 ns;
1258         u32 tmcct;
1259
1260         ASSERT(apic != NULL);
1261
1262         /* if initial count is 0, current count should also be 0 */
1263         if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1264                 apic->lapic_timer.period == 0)
1265                 return 0;
1266
1267         now = ktime_get();
1268         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1269         if (ktime_to_ns(remaining) < 0)
1270                 remaining = 0;
1271
1272         ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1273         tmcct = div64_u64(ns,
1274                          (APIC_BUS_CYCLE_NS * apic->divide_count));
1275
1276         return tmcct;
1277 }
1278
1279 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1280 {
1281         struct kvm_vcpu *vcpu = apic->vcpu;
1282         struct kvm_run *run = vcpu->run;
1283
1284         kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1285         run->tpr_access.rip = kvm_rip_read(vcpu);
1286         run->tpr_access.is_write = write;
1287 }
1288
1289 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1290 {
1291         if (apic->vcpu->arch.tpr_access_reporting)
1292                 __report_tpr_access(apic, write);
1293 }
1294
1295 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1296 {
1297         u32 val = 0;
1298
1299         if (offset >= LAPIC_MMIO_LENGTH)
1300                 return 0;
1301
1302         switch (offset) {
1303         case APIC_ARBPRI:
1304                 break;
1305
1306         case APIC_TMCCT:        /* Timer CCR */
1307                 if (apic_lvtt_tscdeadline(apic))
1308                         return 0;
1309
1310                 val = apic_get_tmcct(apic);
1311                 break;
1312         case APIC_PROCPRI:
1313                 apic_update_ppr(apic);
1314                 val = kvm_lapic_get_reg(apic, offset);
1315                 break;
1316         case APIC_TASKPRI:
1317                 report_tpr_access(apic, false);
1318                 /* fall thru */
1319         default:
1320                 val = kvm_lapic_get_reg(apic, offset);
1321                 break;
1322         }
1323
1324         return val;
1325 }
1326
1327 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1328 {
1329         return container_of(dev, struct kvm_lapic, dev);
1330 }
1331
1332 #define APIC_REG_MASK(reg)      (1ull << ((reg) >> 4))
1333 #define APIC_REGS_MASK(first, count) \
1334         (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1335
1336 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1337                 void *data)
1338 {
1339         unsigned char alignment = offset & 0xf;
1340         u32 result;
1341         /* this bitmask has a bit cleared for each reserved register */
1342         u64 valid_reg_mask =
1343                 APIC_REG_MASK(APIC_ID) |
1344                 APIC_REG_MASK(APIC_LVR) |
1345                 APIC_REG_MASK(APIC_TASKPRI) |
1346                 APIC_REG_MASK(APIC_PROCPRI) |
1347                 APIC_REG_MASK(APIC_LDR) |
1348                 APIC_REG_MASK(APIC_DFR) |
1349                 APIC_REG_MASK(APIC_SPIV) |
1350                 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1351                 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1352                 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1353                 APIC_REG_MASK(APIC_ESR) |
1354                 APIC_REG_MASK(APIC_ICR) |
1355                 APIC_REG_MASK(APIC_ICR2) |
1356                 APIC_REG_MASK(APIC_LVTT) |
1357                 APIC_REG_MASK(APIC_LVTTHMR) |
1358                 APIC_REG_MASK(APIC_LVTPC) |
1359                 APIC_REG_MASK(APIC_LVT0) |
1360                 APIC_REG_MASK(APIC_LVT1) |
1361                 APIC_REG_MASK(APIC_LVTERR) |
1362                 APIC_REG_MASK(APIC_TMICT) |
1363                 APIC_REG_MASK(APIC_TMCCT) |
1364                 APIC_REG_MASK(APIC_TDCR);
1365
1366         /* ARBPRI is not valid on x2APIC */
1367         if (!apic_x2apic_mode(apic))
1368                 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1369
1370         if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1371                 return 1;
1372
1373         result = __apic_read(apic, offset & ~0xf);
1374
1375         trace_kvm_apic_read(offset, result);
1376
1377         switch (len) {
1378         case 1:
1379         case 2:
1380         case 4:
1381                 memcpy(data, (char *)&result + alignment, len);
1382                 break;
1383         default:
1384                 printk(KERN_ERR "Local APIC read with len = %x, "
1385                        "should be 1,2, or 4 instead\n", len);
1386                 break;
1387         }
1388         return 0;
1389 }
1390 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1391
1392 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1393 {
1394         return addr >= apic->base_address &&
1395                 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1396 }
1397
1398 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1399                            gpa_t address, int len, void *data)
1400 {
1401         struct kvm_lapic *apic = to_lapic(this);
1402         u32 offset = address - apic->base_address;
1403
1404         if (!apic_mmio_in_range(apic, address))
1405                 return -EOPNOTSUPP;
1406
1407         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1408                 if (!kvm_check_has_quirk(vcpu->kvm,
1409                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1410                         return -EOPNOTSUPP;
1411
1412                 memset(data, 0xff, len);
1413                 return 0;
1414         }
1415
1416         kvm_lapic_reg_read(apic, offset, len, data);
1417
1418         return 0;
1419 }
1420
1421 static void update_divide_count(struct kvm_lapic *apic)
1422 {
1423         u32 tmp1, tmp2, tdcr;
1424
1425         tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1426         tmp1 = tdcr & 0xf;
1427         tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1428         apic->divide_count = 0x1 << (tmp2 & 0x7);
1429 }
1430
1431 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1432 {
1433         /*
1434          * Do not allow the guest to program periodic timers with small
1435          * interval, since the hrtimers are not throttled by the host
1436          * scheduler.
1437          */
1438         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1439                 s64 min_period = min_timer_period_us * 1000LL;
1440
1441                 if (apic->lapic_timer.period < min_period) {
1442                         pr_info_ratelimited(
1443                             "kvm: vcpu %i: requested %lld ns "
1444                             "lapic timer period limited to %lld ns\n",
1445                             apic->vcpu->vcpu_id,
1446                             apic->lapic_timer.period, min_period);
1447                         apic->lapic_timer.period = min_period;
1448                 }
1449         }
1450 }
1451
1452 static void apic_update_lvtt(struct kvm_lapic *apic)
1453 {
1454         u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1455                         apic->lapic_timer.timer_mode_mask;
1456
1457         if (apic->lapic_timer.timer_mode != timer_mode) {
1458                 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1459                                 APIC_LVT_TIMER_TSCDEADLINE)) {
1460                         hrtimer_cancel(&apic->lapic_timer.timer);
1461                         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1462                         apic->lapic_timer.period = 0;
1463                         apic->lapic_timer.tscdeadline = 0;
1464                 }
1465                 apic->lapic_timer.timer_mode = timer_mode;
1466                 limit_periodic_timer_frequency(apic);
1467         }
1468 }
1469
1470 /*
1471  * On APICv, this test will cause a busy wait
1472  * during a higher-priority task.
1473  */
1474
1475 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1476 {
1477         struct kvm_lapic *apic = vcpu->arch.apic;
1478         u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1479
1480         if (kvm_apic_hw_enabled(apic)) {
1481                 int vec = reg & APIC_VECTOR_MASK;
1482                 void *bitmap = apic->regs + APIC_ISR;
1483
1484                 if (vcpu->arch.apicv_active)
1485                         bitmap = apic->regs + APIC_IRR;
1486
1487                 if (apic_test_vector(vec, bitmap))
1488                         return true;
1489         }
1490         return false;
1491 }
1492
1493 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1494 {
1495         u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1496
1497         /*
1498          * If the guest TSC is running at a different ratio than the host, then
1499          * convert the delay to nanoseconds to achieve an accurate delay.  Note
1500          * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1501          * always for VMX enabled hardware.
1502          */
1503         if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1504                 __delay(min(guest_cycles,
1505                         nsec_to_cycles(vcpu, timer_advance_ns)));
1506         } else {
1507                 u64 delay_ns = guest_cycles * 1000000ULL;
1508                 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1509                 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1510         }
1511 }
1512
1513 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1514                                               s64 advance_expire_delta)
1515 {
1516         struct kvm_lapic *apic = vcpu->arch.apic;
1517         u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1518         u64 ns;
1519
1520         /* Do not adjust for tiny fluctuations or large random spikes. */
1521         if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1522             abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1523                 return;
1524
1525         /* too early */
1526         if (advance_expire_delta < 0) {
1527                 ns = -advance_expire_delta * 1000000ULL;
1528                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1529                 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1530         } else {
1531         /* too late */
1532                 ns = advance_expire_delta * 1000000ULL;
1533                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1534                 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1535         }
1536
1537         if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1538                 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1539         apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1540 }
1541
1542 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1543 {
1544         struct kvm_lapic *apic = vcpu->arch.apic;
1545         u64 guest_tsc, tsc_deadline;
1546
1547         if (apic->lapic_timer.expired_tscdeadline == 0)
1548                 return;
1549
1550         tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1551         apic->lapic_timer.expired_tscdeadline = 0;
1552         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1553         apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1554
1555         if (guest_tsc < tsc_deadline)
1556                 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1557
1558         if (lapic_timer_advance_dynamic)
1559                 adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1560 }
1561
1562 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1563 {
1564         if (lapic_timer_int_injected(vcpu))
1565                 __kvm_wait_lapic_expire(vcpu);
1566 }
1567 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1568
1569 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1570 {
1571         struct kvm_timer *ktimer = &apic->lapic_timer;
1572
1573         kvm_apic_local_deliver(apic, APIC_LVTT);
1574         if (apic_lvtt_tscdeadline(apic)) {
1575                 ktimer->tscdeadline = 0;
1576         } else if (apic_lvtt_oneshot(apic)) {
1577                 ktimer->tscdeadline = 0;
1578                 ktimer->target_expiration = 0;
1579         }
1580 }
1581
1582 static void apic_timer_expired(struct kvm_lapic *apic)
1583 {
1584         struct kvm_vcpu *vcpu = apic->vcpu;
1585         struct kvm_timer *ktimer = &apic->lapic_timer;
1586
1587         if (atomic_read(&apic->lapic_timer.pending))
1588                 return;
1589
1590         if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1591                 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1592
1593         if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1594                 if (apic->lapic_timer.timer_advance_ns)
1595                         __kvm_wait_lapic_expire(vcpu);
1596                 kvm_apic_inject_pending_timer_irqs(apic);
1597                 return;
1598         }
1599
1600         atomic_inc(&apic->lapic_timer.pending);
1601         kvm_set_pending_timer(vcpu);
1602 }
1603
1604 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1605 {
1606         struct kvm_timer *ktimer = &apic->lapic_timer;
1607         u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1608         u64 ns = 0;
1609         ktime_t expire;
1610         struct kvm_vcpu *vcpu = apic->vcpu;
1611         unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1612         unsigned long flags;
1613         ktime_t now;
1614
1615         if (unlikely(!tscdeadline || !this_tsc_khz))
1616                 return;
1617
1618         local_irq_save(flags);
1619
1620         now = ktime_get();
1621         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1622
1623         ns = (tscdeadline - guest_tsc) * 1000000ULL;
1624         do_div(ns, this_tsc_khz);
1625
1626         if (likely(tscdeadline > guest_tsc) &&
1627             likely(ns > apic->lapic_timer.timer_advance_ns)) {
1628                 expire = ktime_add_ns(now, ns);
1629                 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1630                 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1631         } else
1632                 apic_timer_expired(apic);
1633
1634         local_irq_restore(flags);
1635 }
1636
1637 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1638 {
1639         ktime_t now, remaining;
1640         u64 ns_remaining_old, ns_remaining_new;
1641
1642         apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1643                 * APIC_BUS_CYCLE_NS * apic->divide_count;
1644         limit_periodic_timer_frequency(apic);
1645
1646         now = ktime_get();
1647         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1648         if (ktime_to_ns(remaining) < 0)
1649                 remaining = 0;
1650
1651         ns_remaining_old = ktime_to_ns(remaining);
1652         ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1653                                            apic->divide_count, old_divisor);
1654
1655         apic->lapic_timer.tscdeadline +=
1656                 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1657                 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1658         apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1659 }
1660
1661 static bool set_target_expiration(struct kvm_lapic *apic)
1662 {
1663         ktime_t now;
1664         u64 tscl = rdtsc();
1665
1666         now = ktime_get();
1667         apic->lapic_timer.period = (u64)kvm_lapic_get_reg(apic, APIC_TMICT)
1668                 * APIC_BUS_CYCLE_NS * apic->divide_count;
1669
1670         if (!apic->lapic_timer.period) {
1671                 apic->lapic_timer.tscdeadline = 0;
1672                 return false;
1673         }
1674
1675         limit_periodic_timer_frequency(apic);
1676
1677         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1678                 nsec_to_cycles(apic->vcpu, apic->lapic_timer.period);
1679         apic->lapic_timer.target_expiration = ktime_add_ns(now, apic->lapic_timer.period);
1680
1681         return true;
1682 }
1683
1684 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1685 {
1686         ktime_t now = ktime_get();
1687         u64 tscl = rdtsc();
1688         ktime_t delta;
1689
1690         /*
1691          * Synchronize both deadlines to the same time source or
1692          * differences in the periods (caused by differences in the
1693          * underlying clocks or numerical approximation errors) will
1694          * cause the two to drift apart over time as the errors
1695          * accumulate.
1696          */
1697         apic->lapic_timer.target_expiration =
1698                 ktime_add_ns(apic->lapic_timer.target_expiration,
1699                                 apic->lapic_timer.period);
1700         delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1701         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1702                 nsec_to_cycles(apic->vcpu, delta);
1703 }
1704
1705 static void start_sw_period(struct kvm_lapic *apic)
1706 {
1707         if (!apic->lapic_timer.period)
1708                 return;
1709
1710         if (ktime_after(ktime_get(),
1711                         apic->lapic_timer.target_expiration)) {
1712                 apic_timer_expired(apic);
1713
1714                 if (apic_lvtt_oneshot(apic))
1715                         return;
1716
1717                 advance_periodic_target_expiration(apic);
1718         }
1719
1720         hrtimer_start(&apic->lapic_timer.timer,
1721                 apic->lapic_timer.target_expiration,
1722                 HRTIMER_MODE_ABS);
1723 }
1724
1725 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1726 {
1727         if (!lapic_in_kernel(vcpu))
1728                 return false;
1729
1730         return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1731 }
1732 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1733
1734 static void cancel_hv_timer(struct kvm_lapic *apic)
1735 {
1736         WARN_ON(preemptible());
1737         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1738         kvm_x86_ops->cancel_hv_timer(apic->vcpu);
1739         apic->lapic_timer.hv_timer_in_use = false;
1740 }
1741
1742 static bool start_hv_timer(struct kvm_lapic *apic)
1743 {
1744         struct kvm_timer *ktimer = &apic->lapic_timer;
1745         struct kvm_vcpu *vcpu = apic->vcpu;
1746         bool expired;
1747
1748         WARN_ON(preemptible());
1749         if (!kvm_x86_ops->set_hv_timer)
1750                 return false;
1751
1752         if (!ktimer->tscdeadline)
1753                 return false;
1754
1755         if (kvm_x86_ops->set_hv_timer(vcpu, ktimer->tscdeadline, &expired))
1756                 return false;
1757
1758         ktimer->hv_timer_in_use = true;
1759         hrtimer_cancel(&ktimer->timer);
1760
1761         /*
1762          * To simplify handling the periodic timer, leave the hv timer running
1763          * even if the deadline timer has expired, i.e. rely on the resulting
1764          * VM-Exit to recompute the periodic timer's target expiration.
1765          */
1766         if (!apic_lvtt_period(apic)) {
1767                 /*
1768                  * Cancel the hv timer if the sw timer fired while the hv timer
1769                  * was being programmed, or if the hv timer itself expired.
1770                  */
1771                 if (atomic_read(&ktimer->pending)) {
1772                         cancel_hv_timer(apic);
1773                 } else if (expired) {
1774                         apic_timer_expired(apic);
1775                         cancel_hv_timer(apic);
1776                 }
1777         }
1778
1779         trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1780
1781         return true;
1782 }
1783
1784 static void start_sw_timer(struct kvm_lapic *apic)
1785 {
1786         struct kvm_timer *ktimer = &apic->lapic_timer;
1787
1788         WARN_ON(preemptible());
1789         if (apic->lapic_timer.hv_timer_in_use)
1790                 cancel_hv_timer(apic);
1791         if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1792                 return;
1793
1794         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1795                 start_sw_period(apic);
1796         else if (apic_lvtt_tscdeadline(apic))
1797                 start_sw_tscdeadline(apic);
1798         trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1799 }
1800
1801 static void restart_apic_timer(struct kvm_lapic *apic)
1802 {
1803         preempt_disable();
1804
1805         if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1806                 goto out;
1807
1808         if (!start_hv_timer(apic))
1809                 start_sw_timer(apic);
1810 out:
1811         preempt_enable();
1812 }
1813
1814 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1815 {
1816         struct kvm_lapic *apic = vcpu->arch.apic;
1817
1818         preempt_disable();
1819         /* If the preempt notifier has already run, it also called apic_timer_expired */
1820         if (!apic->lapic_timer.hv_timer_in_use)
1821                 goto out;
1822         WARN_ON(swait_active(&vcpu->wq));
1823         cancel_hv_timer(apic);
1824         apic_timer_expired(apic);
1825
1826         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1827                 advance_periodic_target_expiration(apic);
1828                 restart_apic_timer(apic);
1829         }
1830 out:
1831         preempt_enable();
1832 }
1833 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1834
1835 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1836 {
1837         restart_apic_timer(vcpu->arch.apic);
1838 }
1839 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1840
1841 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1842 {
1843         struct kvm_lapic *apic = vcpu->arch.apic;
1844
1845         preempt_disable();
1846         /* Possibly the TSC deadline timer is not enabled yet */
1847         if (apic->lapic_timer.hv_timer_in_use)
1848                 start_sw_timer(apic);
1849         preempt_enable();
1850 }
1851 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1852
1853 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1854 {
1855         struct kvm_lapic *apic = vcpu->arch.apic;
1856
1857         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1858         restart_apic_timer(apic);
1859 }
1860
1861 static void start_apic_timer(struct kvm_lapic *apic)
1862 {
1863         atomic_set(&apic->lapic_timer.pending, 0);
1864
1865         if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1866             && !set_target_expiration(apic))
1867                 return;
1868
1869         restart_apic_timer(apic);
1870 }
1871
1872 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1873 {
1874         bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1875
1876         if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1877                 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1878                 if (lvt0_in_nmi_mode) {
1879                         atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1880                 } else
1881                         atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1882         }
1883 }
1884
1885 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1886 {
1887         int ret = 0;
1888
1889         trace_kvm_apic_write(reg, val);
1890
1891         switch (reg) {
1892         case APIC_ID:           /* Local APIC ID */
1893                 if (!apic_x2apic_mode(apic))
1894                         kvm_apic_set_xapic_id(apic, val >> 24);
1895                 else
1896                         ret = 1;
1897                 break;
1898
1899         case APIC_TASKPRI:
1900                 report_tpr_access(apic, true);
1901                 apic_set_tpr(apic, val & 0xff);
1902                 break;
1903
1904         case APIC_EOI:
1905                 apic_set_eoi(apic);
1906                 break;
1907
1908         case APIC_LDR:
1909                 if (!apic_x2apic_mode(apic))
1910                         kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1911                 else
1912                         ret = 1;
1913                 break;
1914
1915         case APIC_DFR:
1916                 if (!apic_x2apic_mode(apic)) {
1917                         kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
1918                         recalculate_apic_map(apic->vcpu->kvm);
1919                 } else
1920                         ret = 1;
1921                 break;
1922
1923         case APIC_SPIV: {
1924                 u32 mask = 0x3ff;
1925                 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
1926                         mask |= APIC_SPIV_DIRECTED_EOI;
1927                 apic_set_spiv(apic, val & mask);
1928                 if (!(val & APIC_SPIV_APIC_ENABLED)) {
1929                         int i;
1930                         u32 lvt_val;
1931
1932                         for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
1933                                 lvt_val = kvm_lapic_get_reg(apic,
1934                                                        APIC_LVTT + 0x10 * i);
1935                                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
1936                                              lvt_val | APIC_LVT_MASKED);
1937                         }
1938                         apic_update_lvtt(apic);
1939                         atomic_set(&apic->lapic_timer.pending, 0);
1940
1941                 }
1942                 break;
1943         }
1944         case APIC_ICR:
1945                 /* No delay here, so we always clear the pending bit */
1946                 val &= ~(1 << 12);
1947                 apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
1948                 kvm_lapic_set_reg(apic, APIC_ICR, val);
1949                 break;
1950
1951         case APIC_ICR2:
1952                 if (!apic_x2apic_mode(apic))
1953                         val &= 0xff000000;
1954                 kvm_lapic_set_reg(apic, APIC_ICR2, val);
1955                 break;
1956
1957         case APIC_LVT0:
1958                 apic_manage_nmi_watchdog(apic, val);
1959                 /* fall through */
1960         case APIC_LVTTHMR:
1961         case APIC_LVTPC:
1962         case APIC_LVT1:
1963         case APIC_LVTERR: {
1964                 /* TODO: Check vector */
1965                 size_t size;
1966                 u32 index;
1967
1968                 if (!kvm_apic_sw_enabled(apic))
1969                         val |= APIC_LVT_MASKED;
1970                 size = ARRAY_SIZE(apic_lvt_mask);
1971                 index = array_index_nospec(
1972                                 (reg - APIC_LVTT) >> 4, size);
1973                 val &= apic_lvt_mask[index];
1974                 kvm_lapic_set_reg(apic, reg, val);
1975                 break;
1976         }
1977
1978         case APIC_LVTT:
1979                 if (!kvm_apic_sw_enabled(apic))
1980                         val |= APIC_LVT_MASKED;
1981                 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
1982                 kvm_lapic_set_reg(apic, APIC_LVTT, val);
1983                 apic_update_lvtt(apic);
1984                 break;
1985
1986         case APIC_TMICT:
1987                 if (apic_lvtt_tscdeadline(apic))
1988                         break;
1989
1990                 hrtimer_cancel(&apic->lapic_timer.timer);
1991                 kvm_lapic_set_reg(apic, APIC_TMICT, val);
1992                 start_apic_timer(apic);
1993                 break;
1994
1995         case APIC_TDCR: {
1996                 uint32_t old_divisor = apic->divide_count;
1997
1998                 kvm_lapic_set_reg(apic, APIC_TDCR, val);
1999                 update_divide_count(apic);
2000                 if (apic->divide_count != old_divisor &&
2001                                 apic->lapic_timer.period) {
2002                         hrtimer_cancel(&apic->lapic_timer.timer);
2003                         update_target_expiration(apic, old_divisor);
2004                         restart_apic_timer(apic);
2005                 }
2006                 break;
2007         }
2008         case APIC_ESR:
2009                 if (apic_x2apic_mode(apic) && val != 0)
2010                         ret = 1;
2011                 break;
2012
2013         case APIC_SELF_IPI:
2014                 if (apic_x2apic_mode(apic)) {
2015                         kvm_lapic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
2016                 } else
2017                         ret = 1;
2018                 break;
2019         default:
2020                 ret = 1;
2021                 break;
2022         }
2023
2024         return ret;
2025 }
2026 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2027
2028 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2029                             gpa_t address, int len, const void *data)
2030 {
2031         struct kvm_lapic *apic = to_lapic(this);
2032         unsigned int offset = address - apic->base_address;
2033         u32 val;
2034
2035         if (!apic_mmio_in_range(apic, address))
2036                 return -EOPNOTSUPP;
2037
2038         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2039                 if (!kvm_check_has_quirk(vcpu->kvm,
2040                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2041                         return -EOPNOTSUPP;
2042
2043                 return 0;
2044         }
2045
2046         /*
2047          * APIC register must be aligned on 128-bits boundary.
2048          * 32/64/128 bits registers must be accessed thru 32 bits.
2049          * Refer SDM 8.4.1
2050          */
2051         if (len != 4 || (offset & 0xf))
2052                 return 0;
2053
2054         val = *(u32*)data;
2055
2056         kvm_lapic_reg_write(apic, offset & 0xff0, val);
2057
2058         return 0;
2059 }
2060
2061 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2062 {
2063         kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2064 }
2065 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2066
2067 /* emulate APIC access in a trap manner */
2068 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2069 {
2070         u32 val = 0;
2071
2072         /* hw has done the conditional check and inst decode */
2073         offset &= 0xff0;
2074
2075         kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2076
2077         /* TODO: optimize to just emulate side effect w/o one more write */
2078         kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2079 }
2080 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2081
2082 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2083 {
2084         struct kvm_lapic *apic = vcpu->arch.apic;
2085
2086         if (!vcpu->arch.apic)
2087                 return;
2088
2089         hrtimer_cancel(&apic->lapic_timer.timer);
2090
2091         if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2092                 static_key_slow_dec_deferred(&apic_hw_disabled);
2093
2094         if (!apic->sw_enabled)
2095                 static_key_slow_dec_deferred(&apic_sw_disabled);
2096
2097         if (apic->regs)
2098                 free_page((unsigned long)apic->regs);
2099
2100         kfree(apic);
2101 }
2102
2103 /*
2104  *----------------------------------------------------------------------
2105  * LAPIC interface
2106  *----------------------------------------------------------------------
2107  */
2108 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2109 {
2110         struct kvm_lapic *apic = vcpu->arch.apic;
2111
2112         if (!lapic_in_kernel(vcpu) ||
2113                 !apic_lvtt_tscdeadline(apic))
2114                 return 0;
2115
2116         return apic->lapic_timer.tscdeadline;
2117 }
2118
2119 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2120 {
2121         struct kvm_lapic *apic = vcpu->arch.apic;
2122
2123         if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
2124                         apic_lvtt_period(apic))
2125                 return;
2126
2127         hrtimer_cancel(&apic->lapic_timer.timer);
2128         apic->lapic_timer.tscdeadline = data;
2129         start_apic_timer(apic);
2130 }
2131
2132 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2133 {
2134         struct kvm_lapic *apic = vcpu->arch.apic;
2135
2136         apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2137                      | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2138 }
2139
2140 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2141 {
2142         u64 tpr;
2143
2144         tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2145
2146         return (tpr & 0xf0) >> 4;
2147 }
2148
2149 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2150 {
2151         u64 old_value = vcpu->arch.apic_base;
2152         struct kvm_lapic *apic = vcpu->arch.apic;
2153
2154         if (!apic)
2155                 value |= MSR_IA32_APICBASE_BSP;
2156
2157         vcpu->arch.apic_base = value;
2158
2159         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2160                 kvm_update_cpuid(vcpu);
2161
2162         if (!apic)
2163                 return;
2164
2165         /* update jump label if enable bit changes */
2166         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2167                 if (value & MSR_IA32_APICBASE_ENABLE) {
2168                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2169                         static_key_slow_dec_deferred(&apic_hw_disabled);
2170                 } else {
2171                         static_key_slow_inc(&apic_hw_disabled.key);
2172                         recalculate_apic_map(vcpu->kvm);
2173                 }
2174         }
2175
2176         if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2177                 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2178
2179         if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2180                 kvm_x86_ops->set_virtual_apic_mode(vcpu);
2181
2182         apic->base_address = apic->vcpu->arch.apic_base &
2183                              MSR_IA32_APICBASE_BASE;
2184
2185         if ((value & MSR_IA32_APICBASE_ENABLE) &&
2186              apic->base_address != APIC_DEFAULT_PHYS_BASE)
2187                 pr_warn_once("APIC base relocation is unsupported by KVM");
2188 }
2189
2190 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2191 {
2192         struct kvm_lapic *apic = vcpu->arch.apic;
2193
2194         if (vcpu->arch.apicv_active) {
2195                 /* irr_pending is always true when apicv is activated. */
2196                 apic->irr_pending = true;
2197                 apic->isr_count = 1;
2198         } else {
2199                 apic->irr_pending = (apic_search_irr(apic) != -1);
2200                 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2201         }
2202 }
2203 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2204
2205 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2206 {
2207         struct kvm_lapic *apic = vcpu->arch.apic;
2208         int i;
2209
2210         if (!apic)
2211                 return;
2212
2213         /* Stop the timer in case it's a reset to an active apic */
2214         hrtimer_cancel(&apic->lapic_timer.timer);
2215
2216         if (!init_event) {
2217                 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2218                                          MSR_IA32_APICBASE_ENABLE);
2219                 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2220         }
2221         kvm_apic_set_version(apic->vcpu);
2222
2223         for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2224                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2225         apic_update_lvtt(apic);
2226         if (kvm_vcpu_is_reset_bsp(vcpu) &&
2227             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2228                 kvm_lapic_set_reg(apic, APIC_LVT0,
2229                              SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2230         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2231
2232         kvm_lapic_set_reg(apic, APIC_DFR, 0xffffffffU);
2233         apic_set_spiv(apic, 0xff);
2234         kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2235         if (!apic_x2apic_mode(apic))
2236                 kvm_apic_set_ldr(apic, 0);
2237         kvm_lapic_set_reg(apic, APIC_ESR, 0);
2238         kvm_lapic_set_reg(apic, APIC_ICR, 0);
2239         kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2240         kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2241         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2242         for (i = 0; i < 8; i++) {
2243                 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2244                 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2245                 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2246         }
2247         kvm_apic_update_apicv(vcpu);
2248         apic->highest_isr_cache = -1;
2249         update_divide_count(apic);
2250         atomic_set(&apic->lapic_timer.pending, 0);
2251         if (kvm_vcpu_is_bsp(vcpu))
2252                 kvm_lapic_set_base(vcpu,
2253                                 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2254         vcpu->arch.pv_eoi.msr_val = 0;
2255         apic_update_ppr(apic);
2256         if (vcpu->arch.apicv_active) {
2257                 kvm_x86_ops->apicv_post_state_restore(vcpu);
2258                 kvm_x86_ops->hwapic_irr_update(vcpu, -1);
2259                 kvm_x86_ops->hwapic_isr_update(vcpu, -1);
2260         }
2261
2262         vcpu->arch.apic_arb_prio = 0;
2263         vcpu->arch.apic_attention = 0;
2264 }
2265
2266 /*
2267  *----------------------------------------------------------------------
2268  * timer interface
2269  *----------------------------------------------------------------------
2270  */
2271
2272 static bool lapic_is_periodic(struct kvm_lapic *apic)
2273 {
2274         return apic_lvtt_period(apic);
2275 }
2276
2277 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2278 {
2279         struct kvm_lapic *apic = vcpu->arch.apic;
2280
2281         if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2282                 return atomic_read(&apic->lapic_timer.pending);
2283
2284         return 0;
2285 }
2286
2287 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2288 {
2289         u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2290         int vector, mode, trig_mode;
2291
2292         if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2293                 vector = reg & APIC_VECTOR_MASK;
2294                 mode = reg & APIC_MODE_MASK;
2295                 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2296                 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2297                                         NULL);
2298         }
2299         return 0;
2300 }
2301
2302 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2303 {
2304         struct kvm_lapic *apic = vcpu->arch.apic;
2305
2306         if (apic)
2307                 kvm_apic_local_deliver(apic, APIC_LVT0);
2308 }
2309
2310 static const struct kvm_io_device_ops apic_mmio_ops = {
2311         .read     = apic_mmio_read,
2312         .write    = apic_mmio_write,
2313 };
2314
2315 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2316 {
2317         struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2318         struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2319
2320         apic_timer_expired(apic);
2321
2322         if (lapic_is_periodic(apic)) {
2323                 advance_periodic_target_expiration(apic);
2324                 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2325                 return HRTIMER_RESTART;
2326         } else
2327                 return HRTIMER_NORESTART;
2328 }
2329
2330 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2331 {
2332         struct kvm_lapic *apic;
2333
2334         ASSERT(vcpu != NULL);
2335
2336         apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2337         if (!apic)
2338                 goto nomem;
2339
2340         vcpu->arch.apic = apic;
2341
2342         apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2343         if (!apic->regs) {
2344                 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2345                        vcpu->vcpu_id);
2346                 goto nomem_free_apic;
2347         }
2348         apic->vcpu = vcpu;
2349
2350         hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2351                      HRTIMER_MODE_ABS_HARD);
2352         apic->lapic_timer.timer.function = apic_timer_fn;
2353         if (timer_advance_ns == -1) {
2354                 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2355                 lapic_timer_advance_dynamic = true;
2356         } else {
2357                 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2358                 lapic_timer_advance_dynamic = false;
2359         }
2360
2361         /*
2362          * APIC is created enabled. This will prevent kvm_lapic_set_base from
2363          * thinking that APIC state has changed.
2364          */
2365         vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2366         static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2367         kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2368
2369         return 0;
2370 nomem_free_apic:
2371         kfree(apic);
2372         vcpu->arch.apic = NULL;
2373 nomem:
2374         return -ENOMEM;
2375 }
2376
2377 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2378 {
2379         struct kvm_lapic *apic = vcpu->arch.apic;
2380         u32 ppr;
2381
2382         if (!kvm_apic_hw_enabled(apic))
2383                 return -1;
2384
2385         __apic_update_ppr(apic, &ppr);
2386         return apic_has_interrupt_for_ppr(apic, ppr);
2387 }
2388
2389 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2390 {
2391         u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2392
2393         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2394                 return 1;
2395         if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2396             GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2397                 return 1;
2398         return 0;
2399 }
2400
2401 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2402 {
2403         struct kvm_lapic *apic = vcpu->arch.apic;
2404
2405         if (atomic_read(&apic->lapic_timer.pending) > 0) {
2406                 kvm_apic_inject_pending_timer_irqs(apic);
2407                 atomic_set(&apic->lapic_timer.pending, 0);
2408         }
2409 }
2410
2411 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2412 {
2413         int vector = kvm_apic_has_interrupt(vcpu);
2414         struct kvm_lapic *apic = vcpu->arch.apic;
2415         u32 ppr;
2416
2417         if (vector == -1)
2418                 return -1;
2419
2420         /*
2421          * We get here even with APIC virtualization enabled, if doing
2422          * nested virtualization and L1 runs with the "acknowledge interrupt
2423          * on exit" mode.  Then we cannot inject the interrupt via RVI,
2424          * because the process would deliver it through the IDT.
2425          */
2426
2427         apic_clear_irr(vector, apic);
2428         if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) {
2429                 /*
2430                  * For auto-EOI interrupts, there might be another pending
2431                  * interrupt above PPR, so check whether to raise another
2432                  * KVM_REQ_EVENT.
2433                  */
2434                 apic_update_ppr(apic);
2435         } else {
2436                 /*
2437                  * For normal interrupts, PPR has been raised and there cannot
2438                  * be a higher-priority pending interrupt---except if there was
2439                  * a concurrent interrupt injection, but that would have
2440                  * triggered KVM_REQ_EVENT already.
2441                  */
2442                 apic_set_isr(vector, apic);
2443                 __apic_update_ppr(apic, &ppr);
2444         }
2445
2446         return vector;
2447 }
2448
2449 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2450                 struct kvm_lapic_state *s, bool set)
2451 {
2452         if (apic_x2apic_mode(vcpu->arch.apic)) {
2453                 u32 *id = (u32 *)(s->regs + APIC_ID);
2454                 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2455
2456                 if (vcpu->kvm->arch.x2apic_format) {
2457                         if (*id != vcpu->vcpu_id)
2458                                 return -EINVAL;
2459                 } else {
2460                         if (set)
2461                                 *id >>= 24;
2462                         else
2463                                 *id <<= 24;
2464                 }
2465
2466                 /* In x2APIC mode, the LDR is fixed and based on the id */
2467                 if (set)
2468                         *ldr = kvm_apic_calc_x2apic_ldr(*id);
2469         }
2470
2471         return 0;
2472 }
2473
2474 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2475 {
2476         memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2477         return kvm_apic_state_fixup(vcpu, s, false);
2478 }
2479
2480 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2481 {
2482         struct kvm_lapic *apic = vcpu->arch.apic;
2483         int r;
2484
2485
2486         kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2487         /* set SPIV separately to get count of SW disabled APICs right */
2488         apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2489
2490         r = kvm_apic_state_fixup(vcpu, s, true);
2491         if (r)
2492                 return r;
2493         memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2494
2495         recalculate_apic_map(vcpu->kvm);
2496         kvm_apic_set_version(vcpu);
2497
2498         apic_update_ppr(apic);
2499         hrtimer_cancel(&apic->lapic_timer.timer);
2500         apic_update_lvtt(apic);
2501         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2502         update_divide_count(apic);
2503         start_apic_timer(apic);
2504         kvm_apic_update_apicv(vcpu);
2505         apic->highest_isr_cache = -1;
2506         if (vcpu->arch.apicv_active) {
2507                 kvm_x86_ops->apicv_post_state_restore(vcpu);
2508                 kvm_x86_ops->hwapic_irr_update(vcpu,
2509                                 apic_find_highest_irr(apic));
2510                 kvm_x86_ops->hwapic_isr_update(vcpu,
2511                                 apic_find_highest_isr(apic));
2512         }
2513         kvm_make_request(KVM_REQ_EVENT, vcpu);
2514         if (ioapic_in_kernel(vcpu->kvm))
2515                 kvm_rtc_eoi_tracking_restore_one(vcpu);
2516
2517         vcpu->arch.apic_arb_prio = 0;
2518
2519         return 0;
2520 }
2521
2522 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2523 {
2524         struct hrtimer *timer;
2525
2526         if (!lapic_in_kernel(vcpu) ||
2527                 kvm_can_post_timer_interrupt(vcpu))
2528                 return;
2529
2530         timer = &vcpu->arch.apic->lapic_timer.timer;
2531         if (hrtimer_cancel(timer))
2532                 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2533 }
2534
2535 /*
2536  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2537  *
2538  * Detect whether guest triggered PV EOI since the
2539  * last entry. If yes, set EOI on guests's behalf.
2540  * Clear PV EOI in guest memory in any case.
2541  */
2542 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2543                                         struct kvm_lapic *apic)
2544 {
2545         bool pending;
2546         int vector;
2547         /*
2548          * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2549          * and KVM_PV_EOI_ENABLED in guest memory as follows:
2550          *
2551          * KVM_APIC_PV_EOI_PENDING is unset:
2552          *      -> host disabled PV EOI.
2553          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2554          *      -> host enabled PV EOI, guest did not execute EOI yet.
2555          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2556          *      -> host enabled PV EOI, guest executed EOI.
2557          */
2558         BUG_ON(!pv_eoi_enabled(vcpu));
2559         pending = pv_eoi_get_pending(vcpu);
2560         /*
2561          * Clear pending bit in any case: it will be set again on vmentry.
2562          * While this might not be ideal from performance point of view,
2563          * this makes sure pv eoi is only enabled when we know it's safe.
2564          */
2565         pv_eoi_clr_pending(vcpu);
2566         if (pending)
2567                 return;
2568         vector = apic_set_eoi(apic);
2569         trace_kvm_pv_eoi(apic, vector);
2570 }
2571
2572 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2573 {
2574         u32 data;
2575
2576         if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2577                 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2578
2579         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2580                 return;
2581
2582         if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2583                                   sizeof(u32)))
2584                 return;
2585
2586         apic_set_tpr(vcpu->arch.apic, data & 0xff);
2587 }
2588
2589 /*
2590  * apic_sync_pv_eoi_to_guest - called before vmentry
2591  *
2592  * Detect whether it's safe to enable PV EOI and
2593  * if yes do so.
2594  */
2595 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2596                                         struct kvm_lapic *apic)
2597 {
2598         if (!pv_eoi_enabled(vcpu) ||
2599             /* IRR set or many bits in ISR: could be nested. */
2600             apic->irr_pending ||
2601             /* Cache not set: could be safe but we don't bother. */
2602             apic->highest_isr_cache == -1 ||
2603             /* Need EOI to update ioapic. */
2604             kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2605                 /*
2606                  * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2607                  * so we need not do anything here.
2608                  */
2609                 return;
2610         }
2611
2612         pv_eoi_set_pending(apic->vcpu);
2613 }
2614
2615 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2616 {
2617         u32 data, tpr;
2618         int max_irr, max_isr;
2619         struct kvm_lapic *apic = vcpu->arch.apic;
2620
2621         apic_sync_pv_eoi_to_guest(vcpu, apic);
2622
2623         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2624                 return;
2625
2626         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2627         max_irr = apic_find_highest_irr(apic);
2628         if (max_irr < 0)
2629                 max_irr = 0;
2630         max_isr = apic_find_highest_isr(apic);
2631         if (max_isr < 0)
2632                 max_isr = 0;
2633         data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2634
2635         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2636                                 sizeof(u32));
2637 }
2638
2639 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2640 {
2641         if (vapic_addr) {
2642                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2643                                         &vcpu->arch.apic->vapic_cache,
2644                                         vapic_addr, sizeof(u32)))
2645                         return -EINVAL;
2646                 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2647         } else {
2648                 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2649         }
2650
2651         vcpu->arch.apic->vapic_addr = vapic_addr;
2652         return 0;
2653 }
2654
2655 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2656 {
2657         struct kvm_lapic *apic = vcpu->arch.apic;
2658         u32 reg = (msr - APIC_BASE_MSR) << 4;
2659
2660         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2661                 return 1;
2662
2663         if (reg == APIC_ICR2)
2664                 return 1;
2665
2666         /* if this is ICR write vector before command */
2667         if (reg == APIC_ICR)
2668                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2669         return kvm_lapic_reg_write(apic, reg, (u32)data);
2670 }
2671
2672 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2673 {
2674         struct kvm_lapic *apic = vcpu->arch.apic;
2675         u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2676
2677         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2678                 return 1;
2679
2680         if (reg == APIC_DFR || reg == APIC_ICR2)
2681                 return 1;
2682
2683         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2684                 return 1;
2685         if (reg == APIC_ICR)
2686                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2687
2688         *data = (((u64)high) << 32) | low;
2689
2690         return 0;
2691 }
2692
2693 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2694 {
2695         struct kvm_lapic *apic = vcpu->arch.apic;
2696
2697         if (!lapic_in_kernel(vcpu))
2698                 return 1;
2699
2700         /* if this is ICR write vector before command */
2701         if (reg == APIC_ICR)
2702                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2703         return kvm_lapic_reg_write(apic, reg, (u32)data);
2704 }
2705
2706 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2707 {
2708         struct kvm_lapic *apic = vcpu->arch.apic;
2709         u32 low, high = 0;
2710
2711         if (!lapic_in_kernel(vcpu))
2712                 return 1;
2713
2714         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2715                 return 1;
2716         if (reg == APIC_ICR)
2717                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2718
2719         *data = (((u64)high) << 32) | low;
2720
2721         return 0;
2722 }
2723
2724 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2725 {
2726         u64 addr = data & ~KVM_MSR_ENABLED;
2727         struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2728         unsigned long new_len;
2729
2730         if (!IS_ALIGNED(addr, 4))
2731                 return 1;
2732
2733         vcpu->arch.pv_eoi.msr_val = data;
2734         if (!pv_eoi_enabled(vcpu))
2735                 return 0;
2736
2737         if (addr == ghc->gpa && len <= ghc->len)
2738                 new_len = ghc->len;
2739         else
2740                 new_len = len;
2741
2742         return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2743 }
2744
2745 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2746 {
2747         struct kvm_lapic *apic = vcpu->arch.apic;
2748         u8 sipi_vector;
2749         unsigned long pe;
2750
2751         if (!lapic_in_kernel(vcpu) || !apic->pending_events)
2752                 return;
2753
2754         /*
2755          * INITs are latched while CPU is in specific states
2756          * (SMM, VMX non-root mode, SVM with GIF=0).
2757          * Because a CPU cannot be in these states immediately
2758          * after it has processed an INIT signal (and thus in
2759          * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2760          * and leave the INIT pending.
2761          */
2762         if (kvm_vcpu_latch_init(vcpu)) {
2763                 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2764                 if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
2765                         clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2766                 return;
2767         }
2768
2769         pe = xchg(&apic->pending_events, 0);
2770         if (test_bit(KVM_APIC_INIT, &pe)) {
2771                 kvm_vcpu_reset(vcpu, true);
2772                 if (kvm_vcpu_is_bsp(apic->vcpu))
2773                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2774                 else
2775                         vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2776         }
2777         if (test_bit(KVM_APIC_SIPI, &pe) &&
2778             vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2779                 /* evaluate pending_events before reading the vector */
2780                 smp_rmb();
2781                 sipi_vector = apic->sipi_vector;
2782                 kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2783                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2784         }
2785 }
2786
2787 void kvm_lapic_init(void)
2788 {
2789         /* do not patch jump label more than once per second */
2790         jump_label_rate_limit(&apic_hw_disabled, HZ);
2791         jump_label_rate_limit(&apic_sw_disabled, HZ);
2792 }
2793
2794 void kvm_lapic_exit(void)
2795 {
2796         static_key_deferred_flush(&apic_hw_disabled);
2797         static_key_deferred_flush(&apic_sw_disabled);
2798 }