Merge tag 'spi-nor/for-5.14' into mtd/next
[linux-2.6-microblaze.git] / arch / x86 / kvm / lapic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <dor.laor@qumranet.com>
13  *   Gregory Haskins <ghaskins@novell.com>
14  *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
25 #include <linux/io.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/page.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
38 #include "irq.h"
39 #include "ioapic.h"
40 #include "trace.h"
41 #include "x86.h"
42 #include "cpuid.h"
43 #include "hyperv.h"
44
45 #ifndef CONFIG_X86_64
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #else
48 #define mod_64(x, y) ((x) % (y))
49 #endif
50
51 #define PRId64 "d"
52 #define PRIx64 "llx"
53 #define PRIu64 "u"
54 #define PRIo64 "o"
55
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION                    (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH               (1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR                 256
61 #define APIC_VECTORS_PER_REG            32
62
63 static bool lapic_timer_advance_dynamic __read_mostly;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN  100     /* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX  10000   /* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT     1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
70
71 static inline int apic_test_vector(int vec, void *bitmap)
72 {
73         return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
74 }
75
76 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
77 {
78         struct kvm_lapic *apic = vcpu->arch.apic;
79
80         return apic_test_vector(vector, apic->regs + APIC_ISR) ||
81                 apic_test_vector(vector, apic->regs + APIC_IRR);
82 }
83
84 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
85 {
86         return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
87 }
88
89 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
90 {
91         return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
92 }
93
94 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
95 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
96
97 static inline int apic_enabled(struct kvm_lapic *apic)
98 {
99         return kvm_apic_sw_enabled(apic) &&     kvm_apic_hw_enabled(apic);
100 }
101
102 #define LVT_MASK        \
103         (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
104
105 #define LINT_MASK       \
106         (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107          APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
108
109 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
110 {
111         return apic->vcpu->vcpu_id;
112 }
113
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
115 {
116         return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
117 }
118
119 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
120 {
121         return kvm_x86_ops.set_hv_timer
122                && !(kvm_mwait_in_guest(vcpu->kvm) ||
123                     kvm_can_post_timer_interrupt(vcpu));
124 }
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
126
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
128 {
129         return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
130 }
131
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
133                 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
134         switch (map->mode) {
135         case KVM_APIC_MODE_X2APIC: {
136                 u32 offset = (dest_id >> 16) * 16;
137                 u32 max_apic_id = map->max_apic_id;
138
139                 if (offset <= max_apic_id) {
140                         u8 cluster_size = min(max_apic_id - offset + 1, 16U);
141
142                         offset = array_index_nospec(offset, map->max_apic_id + 1);
143                         *cluster = &map->phys_map[offset];
144                         *mask = dest_id & (0xffff >> (16 - cluster_size));
145                 } else {
146                         *mask = 0;
147                 }
148
149                 return true;
150                 }
151         case KVM_APIC_MODE_XAPIC_FLAT:
152                 *cluster = map->xapic_flat_map;
153                 *mask = dest_id & 0xff;
154                 return true;
155         case KVM_APIC_MODE_XAPIC_CLUSTER:
156                 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
157                 *mask = dest_id & 0xf;
158                 return true;
159         default:
160                 /* Not optimized. */
161                 return false;
162         }
163 }
164
165 static void kvm_apic_map_free(struct rcu_head *rcu)
166 {
167         struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
168
169         kvfree(map);
170 }
171
172 /*
173  * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
174  *
175  * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176  * apic_map_lock_held.
177  */
178 enum {
179         CLEAN,
180         UPDATE_IN_PROGRESS,
181         DIRTY
182 };
183
184 void kvm_recalculate_apic_map(struct kvm *kvm)
185 {
186         struct kvm_apic_map *new, *old = NULL;
187         struct kvm_vcpu *vcpu;
188         int i;
189         u32 max_id = 255; /* enough space for any xAPIC ID */
190
191         /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
192         if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
193                 return;
194
195         mutex_lock(&kvm->arch.apic_map_lock);
196         /*
197          * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
198          * (if clean) or the APIC registers (if dirty).
199          */
200         if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
201                                    DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
202                 /* Someone else has updated the map. */
203                 mutex_unlock(&kvm->arch.apic_map_lock);
204                 return;
205         }
206
207         kvm_for_each_vcpu(i, vcpu, kvm)
208                 if (kvm_apic_present(vcpu))
209                         max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
210
211         new = kvzalloc(sizeof(struct kvm_apic_map) +
212                            sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
213                            GFP_KERNEL_ACCOUNT);
214
215         if (!new)
216                 goto out;
217
218         new->max_apic_id = max_id;
219
220         kvm_for_each_vcpu(i, vcpu, kvm) {
221                 struct kvm_lapic *apic = vcpu->arch.apic;
222                 struct kvm_lapic **cluster;
223                 u16 mask;
224                 u32 ldr;
225                 u8 xapic_id;
226                 u32 x2apic_id;
227
228                 if (!kvm_apic_present(vcpu))
229                         continue;
230
231                 xapic_id = kvm_xapic_id(apic);
232                 x2apic_id = kvm_x2apic_id(apic);
233
234                 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
235                 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
236                                 x2apic_id <= new->max_apic_id)
237                         new->phys_map[x2apic_id] = apic;
238                 /*
239                  * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
240                  * prevent them from masking VCPUs with APIC ID <= 0xff.
241                  */
242                 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
243                         new->phys_map[xapic_id] = apic;
244
245                 if (!kvm_apic_sw_enabled(apic))
246                         continue;
247
248                 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
249
250                 if (apic_x2apic_mode(apic)) {
251                         new->mode |= KVM_APIC_MODE_X2APIC;
252                 } else if (ldr) {
253                         ldr = GET_APIC_LOGICAL_ID(ldr);
254                         if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
255                                 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
256                         else
257                                 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
258                 }
259
260                 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
261                         continue;
262
263                 if (mask)
264                         cluster[ffs(mask) - 1] = apic;
265         }
266 out:
267         old = rcu_dereference_protected(kvm->arch.apic_map,
268                         lockdep_is_held(&kvm->arch.apic_map_lock));
269         rcu_assign_pointer(kvm->arch.apic_map, new);
270         /*
271          * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
272          * If another update has come in, leave it DIRTY.
273          */
274         atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
275                                UPDATE_IN_PROGRESS, CLEAN);
276         mutex_unlock(&kvm->arch.apic_map_lock);
277
278         if (old)
279                 call_rcu(&old->rcu, kvm_apic_map_free);
280
281         kvm_make_scan_ioapic_request(kvm);
282 }
283
284 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
285 {
286         bool enabled = val & APIC_SPIV_APIC_ENABLED;
287
288         kvm_lapic_set_reg(apic, APIC_SPIV, val);
289
290         if (enabled != apic->sw_enabled) {
291                 apic->sw_enabled = enabled;
292                 if (enabled)
293                         static_branch_slow_dec_deferred(&apic_sw_disabled);
294                 else
295                         static_branch_inc(&apic_sw_disabled.key);
296
297                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
298         }
299
300         /* Check if there are APF page ready requests pending */
301         if (enabled)
302                 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
303 }
304
305 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
306 {
307         kvm_lapic_set_reg(apic, APIC_ID, id << 24);
308         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
309 }
310
311 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
312 {
313         kvm_lapic_set_reg(apic, APIC_LDR, id);
314         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
315 }
316
317 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
318 {
319         kvm_lapic_set_reg(apic, APIC_DFR, val);
320         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
321 }
322
323 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
324 {
325         return ((id >> 4) << 16) | (1 << (id & 0xf));
326 }
327
328 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
329 {
330         u32 ldr = kvm_apic_calc_x2apic_ldr(id);
331
332         WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
333
334         kvm_lapic_set_reg(apic, APIC_ID, id);
335         kvm_lapic_set_reg(apic, APIC_LDR, ldr);
336         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
337 }
338
339 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
340 {
341         return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
342 }
343
344 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
345 {
346         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
347 }
348
349 static inline int apic_lvtt_period(struct kvm_lapic *apic)
350 {
351         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
352 }
353
354 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
355 {
356         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
357 }
358
359 static inline int apic_lvt_nmi_mode(u32 lvt_val)
360 {
361         return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
362 }
363
364 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
365 {
366         struct kvm_lapic *apic = vcpu->arch.apic;
367         u32 v = APIC_VERSION;
368
369         if (!lapic_in_kernel(vcpu))
370                 return;
371
372         /*
373          * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
374          * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
375          * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
376          * version first and level-triggered interrupts never get EOIed in
377          * IOAPIC.
378          */
379         if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
380             !ioapic_in_kernel(vcpu->kvm))
381                 v |= APIC_LVR_DIRECTED_EOI;
382         kvm_lapic_set_reg(apic, APIC_LVR, v);
383 }
384
385 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
386         LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
387         LVT_MASK | APIC_MODE_MASK,      /* LVTTHMR */
388         LVT_MASK | APIC_MODE_MASK,      /* LVTPC */
389         LINT_MASK, LINT_MASK,   /* LVT0-1 */
390         LVT_MASK                /* LVTERR */
391 };
392
393 static int find_highest_vector(void *bitmap)
394 {
395         int vec;
396         u32 *reg;
397
398         for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
399              vec >= 0; vec -= APIC_VECTORS_PER_REG) {
400                 reg = bitmap + REG_POS(vec);
401                 if (*reg)
402                         return __fls(*reg) + vec;
403         }
404
405         return -1;
406 }
407
408 static u8 count_vectors(void *bitmap)
409 {
410         int vec;
411         u32 *reg;
412         u8 count = 0;
413
414         for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
415                 reg = bitmap + REG_POS(vec);
416                 count += hweight32(*reg);
417         }
418
419         return count;
420 }
421
422 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
423 {
424         u32 i, vec;
425         u32 pir_val, irr_val, prev_irr_val;
426         int max_updated_irr;
427
428         max_updated_irr = -1;
429         *max_irr = -1;
430
431         for (i = vec = 0; i <= 7; i++, vec += 32) {
432                 pir_val = READ_ONCE(pir[i]);
433                 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
434                 if (pir_val) {
435                         prev_irr_val = irr_val;
436                         irr_val |= xchg(&pir[i], 0);
437                         *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
438                         if (prev_irr_val != irr_val) {
439                                 max_updated_irr =
440                                         __fls(irr_val ^ prev_irr_val) + vec;
441                         }
442                 }
443                 if (irr_val)
444                         *max_irr = __fls(irr_val) + vec;
445         }
446
447         return ((max_updated_irr != -1) &&
448                 (max_updated_irr == *max_irr));
449 }
450 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
451
452 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
453 {
454         struct kvm_lapic *apic = vcpu->arch.apic;
455
456         return __kvm_apic_update_irr(pir, apic->regs, max_irr);
457 }
458 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
459
460 static inline int apic_search_irr(struct kvm_lapic *apic)
461 {
462         return find_highest_vector(apic->regs + APIC_IRR);
463 }
464
465 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
466 {
467         int result;
468
469         /*
470          * Note that irr_pending is just a hint. It will be always
471          * true with virtual interrupt delivery enabled.
472          */
473         if (!apic->irr_pending)
474                 return -1;
475
476         result = apic_search_irr(apic);
477         ASSERT(result == -1 || result >= 16);
478
479         return result;
480 }
481
482 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
483 {
484         struct kvm_vcpu *vcpu;
485
486         vcpu = apic->vcpu;
487
488         if (unlikely(vcpu->arch.apicv_active)) {
489                 /* need to update RVI */
490                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
491                 static_call(kvm_x86_hwapic_irr_update)(vcpu,
492                                 apic_find_highest_irr(apic));
493         } else {
494                 apic->irr_pending = false;
495                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
496                 if (apic_search_irr(apic) != -1)
497                         apic->irr_pending = true;
498         }
499 }
500
501 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
502 {
503         apic_clear_irr(vec, vcpu->arch.apic);
504 }
505 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
506
507 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
508 {
509         struct kvm_vcpu *vcpu;
510
511         if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
512                 return;
513
514         vcpu = apic->vcpu;
515
516         /*
517          * With APIC virtualization enabled, all caching is disabled
518          * because the processor can modify ISR under the hood.  Instead
519          * just set SVI.
520          */
521         if (unlikely(vcpu->arch.apicv_active))
522                 static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
523         else {
524                 ++apic->isr_count;
525                 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
526                 /*
527                  * ISR (in service register) bit is set when injecting an interrupt.
528                  * The highest vector is injected. Thus the latest bit set matches
529                  * the highest bit in ISR.
530                  */
531                 apic->highest_isr_cache = vec;
532         }
533 }
534
535 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
536 {
537         int result;
538
539         /*
540          * Note that isr_count is always 1, and highest_isr_cache
541          * is always -1, with APIC virtualization enabled.
542          */
543         if (!apic->isr_count)
544                 return -1;
545         if (likely(apic->highest_isr_cache != -1))
546                 return apic->highest_isr_cache;
547
548         result = find_highest_vector(apic->regs + APIC_ISR);
549         ASSERT(result == -1 || result >= 16);
550
551         return result;
552 }
553
554 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
555 {
556         struct kvm_vcpu *vcpu;
557         if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
558                 return;
559
560         vcpu = apic->vcpu;
561
562         /*
563          * We do get here for APIC virtualization enabled if the guest
564          * uses the Hyper-V APIC enlightenment.  In this case we may need
565          * to trigger a new interrupt delivery by writing the SVI field;
566          * on the other hand isr_count and highest_isr_cache are unused
567          * and must be left alone.
568          */
569         if (unlikely(vcpu->arch.apicv_active))
570                 static_call(kvm_x86_hwapic_isr_update)(vcpu,
571                                                 apic_find_highest_isr(apic));
572         else {
573                 --apic->isr_count;
574                 BUG_ON(apic->isr_count < 0);
575                 apic->highest_isr_cache = -1;
576         }
577 }
578
579 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
580 {
581         /* This may race with setting of irr in __apic_accept_irq() and
582          * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
583          * will cause vmexit immediately and the value will be recalculated
584          * on the next vmentry.
585          */
586         return apic_find_highest_irr(vcpu->arch.apic);
587 }
588 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
589
590 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
591                              int vector, int level, int trig_mode,
592                              struct dest_map *dest_map);
593
594 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
595                      struct dest_map *dest_map)
596 {
597         struct kvm_lapic *apic = vcpu->arch.apic;
598
599         return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
600                         irq->level, irq->trig_mode, dest_map);
601 }
602
603 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
604                          struct kvm_lapic_irq *irq, u32 min)
605 {
606         int i, count = 0;
607         struct kvm_vcpu *vcpu;
608
609         if (min > map->max_apic_id)
610                 return 0;
611
612         for_each_set_bit(i, ipi_bitmap,
613                 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
614                 if (map->phys_map[min + i]) {
615                         vcpu = map->phys_map[min + i]->vcpu;
616                         count += kvm_apic_set_irq(vcpu, irq, NULL);
617                 }
618         }
619
620         return count;
621 }
622
623 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
624                     unsigned long ipi_bitmap_high, u32 min,
625                     unsigned long icr, int op_64_bit)
626 {
627         struct kvm_apic_map *map;
628         struct kvm_lapic_irq irq = {0};
629         int cluster_size = op_64_bit ? 64 : 32;
630         int count;
631
632         if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
633                 return -KVM_EINVAL;
634
635         irq.vector = icr & APIC_VECTOR_MASK;
636         irq.delivery_mode = icr & APIC_MODE_MASK;
637         irq.level = (icr & APIC_INT_ASSERT) != 0;
638         irq.trig_mode = icr & APIC_INT_LEVELTRIG;
639
640         rcu_read_lock();
641         map = rcu_dereference(kvm->arch.apic_map);
642
643         count = -EOPNOTSUPP;
644         if (likely(map)) {
645                 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
646                 min += cluster_size;
647                 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
648         }
649
650         rcu_read_unlock();
651         return count;
652 }
653
654 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
655 {
656
657         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
658                                       sizeof(val));
659 }
660
661 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
662 {
663
664         return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
665                                       sizeof(*val));
666 }
667
668 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
669 {
670         return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
671 }
672
673 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
674 {
675         u8 val;
676         if (pv_eoi_get_user(vcpu, &val) < 0) {
677                 printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
678                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
679                 return false;
680         }
681         return val & KVM_PV_EOI_ENABLED;
682 }
683
684 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
685 {
686         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
687                 printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
688                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
689                 return;
690         }
691         __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
692 }
693
694 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
695 {
696         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
697                 printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
698                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
699                 return;
700         }
701         __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
702 }
703
704 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
705 {
706         int highest_irr;
707         if (apic->vcpu->arch.apicv_active)
708                 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
709         else
710                 highest_irr = apic_find_highest_irr(apic);
711         if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
712                 return -1;
713         return highest_irr;
714 }
715
716 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
717 {
718         u32 tpr, isrv, ppr, old_ppr;
719         int isr;
720
721         old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
722         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
723         isr = apic_find_highest_isr(apic);
724         isrv = (isr != -1) ? isr : 0;
725
726         if ((tpr & 0xf0) >= (isrv & 0xf0))
727                 ppr = tpr & 0xff;
728         else
729                 ppr = isrv & 0xf0;
730
731         *new_ppr = ppr;
732         if (old_ppr != ppr)
733                 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
734
735         return ppr < old_ppr;
736 }
737
738 static void apic_update_ppr(struct kvm_lapic *apic)
739 {
740         u32 ppr;
741
742         if (__apic_update_ppr(apic, &ppr) &&
743             apic_has_interrupt_for_ppr(apic, ppr) != -1)
744                 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
745 }
746
747 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
748 {
749         apic_update_ppr(vcpu->arch.apic);
750 }
751 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
752
753 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
754 {
755         kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
756         apic_update_ppr(apic);
757 }
758
759 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
760 {
761         return mda == (apic_x2apic_mode(apic) ?
762                         X2APIC_BROADCAST : APIC_BROADCAST);
763 }
764
765 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
766 {
767         if (kvm_apic_broadcast(apic, mda))
768                 return true;
769
770         if (apic_x2apic_mode(apic))
771                 return mda == kvm_x2apic_id(apic);
772
773         /*
774          * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
775          * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
776          * this allows unique addressing of VCPUs with APIC ID over 0xff.
777          * The 0xff condition is needed because writeable xAPIC ID.
778          */
779         if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
780                 return true;
781
782         return mda == kvm_xapic_id(apic);
783 }
784
785 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
786 {
787         u32 logical_id;
788
789         if (kvm_apic_broadcast(apic, mda))
790                 return true;
791
792         logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
793
794         if (apic_x2apic_mode(apic))
795                 return ((logical_id >> 16) == (mda >> 16))
796                        && (logical_id & mda & 0xffff) != 0;
797
798         logical_id = GET_APIC_LOGICAL_ID(logical_id);
799
800         switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
801         case APIC_DFR_FLAT:
802                 return (logical_id & mda) != 0;
803         case APIC_DFR_CLUSTER:
804                 return ((logical_id >> 4) == (mda >> 4))
805                        && (logical_id & mda & 0xf) != 0;
806         default:
807                 return false;
808         }
809 }
810
811 /* The KVM local APIC implementation has two quirks:
812  *
813  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
814  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
815  *    KVM doesn't do that aliasing.
816  *
817  *  - in-kernel IOAPIC messages have to be delivered directly to
818  *    x2APIC, because the kernel does not support interrupt remapping.
819  *    In order to support broadcast without interrupt remapping, x2APIC
820  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
821  *    to X2APIC_BROADCAST.
822  *
823  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
824  * important when userspace wants to use x2APIC-format MSIs, because
825  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
826  */
827 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
828                 struct kvm_lapic *source, struct kvm_lapic *target)
829 {
830         bool ipi = source != NULL;
831
832         if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
833             !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
834                 return X2APIC_BROADCAST;
835
836         return dest_id;
837 }
838
839 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
840                            int shorthand, unsigned int dest, int dest_mode)
841 {
842         struct kvm_lapic *target = vcpu->arch.apic;
843         u32 mda = kvm_apic_mda(vcpu, dest, source, target);
844
845         ASSERT(target);
846         switch (shorthand) {
847         case APIC_DEST_NOSHORT:
848                 if (dest_mode == APIC_DEST_PHYSICAL)
849                         return kvm_apic_match_physical_addr(target, mda);
850                 else
851                         return kvm_apic_match_logical_addr(target, mda);
852         case APIC_DEST_SELF:
853                 return target == source;
854         case APIC_DEST_ALLINC:
855                 return true;
856         case APIC_DEST_ALLBUT:
857                 return target != source;
858         default:
859                 return false;
860         }
861 }
862 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
863
864 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
865                        const unsigned long *bitmap, u32 bitmap_size)
866 {
867         u32 mod;
868         int i, idx = -1;
869
870         mod = vector % dest_vcpus;
871
872         for (i = 0; i <= mod; i++) {
873                 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
874                 BUG_ON(idx == bitmap_size);
875         }
876
877         return idx;
878 }
879
880 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
881 {
882         if (!kvm->arch.disabled_lapic_found) {
883                 kvm->arch.disabled_lapic_found = true;
884                 printk(KERN_INFO
885                        "Disabled LAPIC found during irq injection\n");
886         }
887 }
888
889 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
890                 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
891 {
892         if (kvm->arch.x2apic_broadcast_quirk_disabled) {
893                 if ((irq->dest_id == APIC_BROADCAST &&
894                                 map->mode != KVM_APIC_MODE_X2APIC))
895                         return true;
896                 if (irq->dest_id == X2APIC_BROADCAST)
897                         return true;
898         } else {
899                 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
900                 if (irq->dest_id == (x2apic_ipi ?
901                                      X2APIC_BROADCAST : APIC_BROADCAST))
902                         return true;
903         }
904
905         return false;
906 }
907
908 /* Return true if the interrupt can be handled by using *bitmap as index mask
909  * for valid destinations in *dst array.
910  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
911  * Note: we may have zero kvm_lapic destinations when we return true, which
912  * means that the interrupt should be dropped.  In this case, *bitmap would be
913  * zero and *dst undefined.
914  */
915 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
916                 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
917                 struct kvm_apic_map *map, struct kvm_lapic ***dst,
918                 unsigned long *bitmap)
919 {
920         int i, lowest;
921
922         if (irq->shorthand == APIC_DEST_SELF && src) {
923                 *dst = src;
924                 *bitmap = 1;
925                 return true;
926         } else if (irq->shorthand)
927                 return false;
928
929         if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
930                 return false;
931
932         if (irq->dest_mode == APIC_DEST_PHYSICAL) {
933                 if (irq->dest_id > map->max_apic_id) {
934                         *bitmap = 0;
935                 } else {
936                         u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
937                         *dst = &map->phys_map[dest_id];
938                         *bitmap = 1;
939                 }
940                 return true;
941         }
942
943         *bitmap = 0;
944         if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
945                                 (u16 *)bitmap))
946                 return false;
947
948         if (!kvm_lowest_prio_delivery(irq))
949                 return true;
950
951         if (!kvm_vector_hashing_enabled()) {
952                 lowest = -1;
953                 for_each_set_bit(i, bitmap, 16) {
954                         if (!(*dst)[i])
955                                 continue;
956                         if (lowest < 0)
957                                 lowest = i;
958                         else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
959                                                 (*dst)[lowest]->vcpu) < 0)
960                                 lowest = i;
961                 }
962         } else {
963                 if (!*bitmap)
964                         return true;
965
966                 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
967                                 bitmap, 16);
968
969                 if (!(*dst)[lowest]) {
970                         kvm_apic_disabled_lapic_found(kvm);
971                         *bitmap = 0;
972                         return true;
973                 }
974         }
975
976         *bitmap = (lowest >= 0) ? 1 << lowest : 0;
977
978         return true;
979 }
980
981 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
982                 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
983 {
984         struct kvm_apic_map *map;
985         unsigned long bitmap;
986         struct kvm_lapic **dst = NULL;
987         int i;
988         bool ret;
989
990         *r = -1;
991
992         if (irq->shorthand == APIC_DEST_SELF) {
993                 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
994                 return true;
995         }
996
997         rcu_read_lock();
998         map = rcu_dereference(kvm->arch.apic_map);
999
1000         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1001         if (ret) {
1002                 *r = 0;
1003                 for_each_set_bit(i, &bitmap, 16) {
1004                         if (!dst[i])
1005                                 continue;
1006                         *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1007                 }
1008         }
1009
1010         rcu_read_unlock();
1011         return ret;
1012 }
1013
1014 /*
1015  * This routine tries to handle interrupts in posted mode, here is how
1016  * it deals with different cases:
1017  * - For single-destination interrupts, handle it in posted mode
1018  * - Else if vector hashing is enabled and it is a lowest-priority
1019  *   interrupt, handle it in posted mode and use the following mechanism
1020  *   to find the destination vCPU.
1021  *      1. For lowest-priority interrupts, store all the possible
1022  *         destination vCPUs in an array.
1023  *      2. Use "guest vector % max number of destination vCPUs" to find
1024  *         the right destination vCPU in the array for the lowest-priority
1025  *         interrupt.
1026  * - Otherwise, use remapped mode to inject the interrupt.
1027  */
1028 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1029                         struct kvm_vcpu **dest_vcpu)
1030 {
1031         struct kvm_apic_map *map;
1032         unsigned long bitmap;
1033         struct kvm_lapic **dst = NULL;
1034         bool ret = false;
1035
1036         if (irq->shorthand)
1037                 return false;
1038
1039         rcu_read_lock();
1040         map = rcu_dereference(kvm->arch.apic_map);
1041
1042         if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1043                         hweight16(bitmap) == 1) {
1044                 unsigned long i = find_first_bit(&bitmap, 16);
1045
1046                 if (dst[i]) {
1047                         *dest_vcpu = dst[i]->vcpu;
1048                         ret = true;
1049                 }
1050         }
1051
1052         rcu_read_unlock();
1053         return ret;
1054 }
1055
1056 /*
1057  * Add a pending IRQ into lapic.
1058  * Return 1 if successfully added and 0 if discarded.
1059  */
1060 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1061                              int vector, int level, int trig_mode,
1062                              struct dest_map *dest_map)
1063 {
1064         int result = 0;
1065         struct kvm_vcpu *vcpu = apic->vcpu;
1066
1067         trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1068                                   trig_mode, vector);
1069         switch (delivery_mode) {
1070         case APIC_DM_LOWEST:
1071                 vcpu->arch.apic_arb_prio++;
1072                 fallthrough;
1073         case APIC_DM_FIXED:
1074                 if (unlikely(trig_mode && !level))
1075                         break;
1076
1077                 /* FIXME add logic for vcpu on reset */
1078                 if (unlikely(!apic_enabled(apic)))
1079                         break;
1080
1081                 result = 1;
1082
1083                 if (dest_map) {
1084                         __set_bit(vcpu->vcpu_id, dest_map->map);
1085                         dest_map->vectors[vcpu->vcpu_id] = vector;
1086                 }
1087
1088                 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1089                         if (trig_mode)
1090                                 kvm_lapic_set_vector(vector,
1091                                                      apic->regs + APIC_TMR);
1092                         else
1093                                 kvm_lapic_clear_vector(vector,
1094                                                        apic->regs + APIC_TMR);
1095                 }
1096
1097                 if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1098                         kvm_lapic_set_irr(vector, apic);
1099                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1100                         kvm_vcpu_kick(vcpu);
1101                 }
1102                 break;
1103
1104         case APIC_DM_REMRD:
1105                 result = 1;
1106                 vcpu->arch.pv.pv_unhalted = 1;
1107                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1108                 kvm_vcpu_kick(vcpu);
1109                 break;
1110
1111         case APIC_DM_SMI:
1112                 result = 1;
1113                 kvm_make_request(KVM_REQ_SMI, vcpu);
1114                 kvm_vcpu_kick(vcpu);
1115                 break;
1116
1117         case APIC_DM_NMI:
1118                 result = 1;
1119                 kvm_inject_nmi(vcpu);
1120                 kvm_vcpu_kick(vcpu);
1121                 break;
1122
1123         case APIC_DM_INIT:
1124                 if (!trig_mode || level) {
1125                         result = 1;
1126                         /* assumes that there are only KVM_APIC_INIT/SIPI */
1127                         apic->pending_events = (1UL << KVM_APIC_INIT);
1128                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1129                         kvm_vcpu_kick(vcpu);
1130                 }
1131                 break;
1132
1133         case APIC_DM_STARTUP:
1134                 result = 1;
1135                 apic->sipi_vector = vector;
1136                 /* make sure sipi_vector is visible for the receiver */
1137                 smp_wmb();
1138                 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1139                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1140                 kvm_vcpu_kick(vcpu);
1141                 break;
1142
1143         case APIC_DM_EXTINT:
1144                 /*
1145                  * Should only be called by kvm_apic_local_deliver() with LVT0,
1146                  * before NMI watchdog was enabled. Already handled by
1147                  * kvm_apic_accept_pic_intr().
1148                  */
1149                 break;
1150
1151         default:
1152                 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1153                        delivery_mode);
1154                 break;
1155         }
1156         return result;
1157 }
1158
1159 /*
1160  * This routine identifies the destination vcpus mask meant to receive the
1161  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1162  * out the destination vcpus array and set the bitmap or it traverses to
1163  * each available vcpu to identify the same.
1164  */
1165 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1166                               unsigned long *vcpu_bitmap)
1167 {
1168         struct kvm_lapic **dest_vcpu = NULL;
1169         struct kvm_lapic *src = NULL;
1170         struct kvm_apic_map *map;
1171         struct kvm_vcpu *vcpu;
1172         unsigned long bitmap;
1173         int i, vcpu_idx;
1174         bool ret;
1175
1176         rcu_read_lock();
1177         map = rcu_dereference(kvm->arch.apic_map);
1178
1179         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1180                                           &bitmap);
1181         if (ret) {
1182                 for_each_set_bit(i, &bitmap, 16) {
1183                         if (!dest_vcpu[i])
1184                                 continue;
1185                         vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1186                         __set_bit(vcpu_idx, vcpu_bitmap);
1187                 }
1188         } else {
1189                 kvm_for_each_vcpu(i, vcpu, kvm) {
1190                         if (!kvm_apic_present(vcpu))
1191                                 continue;
1192                         if (!kvm_apic_match_dest(vcpu, NULL,
1193                                                  irq->shorthand,
1194                                                  irq->dest_id,
1195                                                  irq->dest_mode))
1196                                 continue;
1197                         __set_bit(i, vcpu_bitmap);
1198                 }
1199         }
1200         rcu_read_unlock();
1201 }
1202
1203 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1204 {
1205         return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1206 }
1207
1208 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1209 {
1210         return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1211 }
1212
1213 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1214 {
1215         int trigger_mode;
1216
1217         /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1218         if (!kvm_ioapic_handles_vector(apic, vector))
1219                 return;
1220
1221         /* Request a KVM exit to inform the userspace IOAPIC. */
1222         if (irqchip_split(apic->vcpu->kvm)) {
1223                 apic->vcpu->arch.pending_ioapic_eoi = vector;
1224                 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1225                 return;
1226         }
1227
1228         if (apic_test_vector(vector, apic->regs + APIC_TMR))
1229                 trigger_mode = IOAPIC_LEVEL_TRIG;
1230         else
1231                 trigger_mode = IOAPIC_EDGE_TRIG;
1232
1233         kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1234 }
1235
1236 static int apic_set_eoi(struct kvm_lapic *apic)
1237 {
1238         int vector = apic_find_highest_isr(apic);
1239
1240         trace_kvm_eoi(apic, vector);
1241
1242         /*
1243          * Not every write EOI will has corresponding ISR,
1244          * one example is when Kernel check timer on setup_IO_APIC
1245          */
1246         if (vector == -1)
1247                 return vector;
1248
1249         apic_clear_isr(vector, apic);
1250         apic_update_ppr(apic);
1251
1252         if (to_hv_vcpu(apic->vcpu) &&
1253             test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1254                 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1255
1256         kvm_ioapic_send_eoi(apic, vector);
1257         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1258         return vector;
1259 }
1260
1261 /*
1262  * this interface assumes a trap-like exit, which has already finished
1263  * desired side effect including vISR and vPPR update.
1264  */
1265 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1266 {
1267         struct kvm_lapic *apic = vcpu->arch.apic;
1268
1269         trace_kvm_eoi(apic, vector);
1270
1271         kvm_ioapic_send_eoi(apic, vector);
1272         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1273 }
1274 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1275
1276 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1277 {
1278         struct kvm_lapic_irq irq;
1279
1280         irq.vector = icr_low & APIC_VECTOR_MASK;
1281         irq.delivery_mode = icr_low & APIC_MODE_MASK;
1282         irq.dest_mode = icr_low & APIC_DEST_MASK;
1283         irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1284         irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1285         irq.shorthand = icr_low & APIC_SHORT_MASK;
1286         irq.msi_redir_hint = false;
1287         if (apic_x2apic_mode(apic))
1288                 irq.dest_id = icr_high;
1289         else
1290                 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1291
1292         trace_kvm_apic_ipi(icr_low, irq.dest_id);
1293
1294         kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1295 }
1296
1297 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1298 {
1299         ktime_t remaining, now;
1300         s64 ns;
1301         u32 tmcct;
1302
1303         ASSERT(apic != NULL);
1304
1305         /* if initial count is 0, current count should also be 0 */
1306         if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1307                 apic->lapic_timer.period == 0)
1308                 return 0;
1309
1310         now = ktime_get();
1311         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1312         if (ktime_to_ns(remaining) < 0)
1313                 remaining = 0;
1314
1315         ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1316         tmcct = div64_u64(ns,
1317                          (APIC_BUS_CYCLE_NS * apic->divide_count));
1318
1319         return tmcct;
1320 }
1321
1322 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1323 {
1324         struct kvm_vcpu *vcpu = apic->vcpu;
1325         struct kvm_run *run = vcpu->run;
1326
1327         kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1328         run->tpr_access.rip = kvm_rip_read(vcpu);
1329         run->tpr_access.is_write = write;
1330 }
1331
1332 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1333 {
1334         if (apic->vcpu->arch.tpr_access_reporting)
1335                 __report_tpr_access(apic, write);
1336 }
1337
1338 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1339 {
1340         u32 val = 0;
1341
1342         if (offset >= LAPIC_MMIO_LENGTH)
1343                 return 0;
1344
1345         switch (offset) {
1346         case APIC_ARBPRI:
1347                 break;
1348
1349         case APIC_TMCCT:        /* Timer CCR */
1350                 if (apic_lvtt_tscdeadline(apic))
1351                         return 0;
1352
1353                 val = apic_get_tmcct(apic);
1354                 break;
1355         case APIC_PROCPRI:
1356                 apic_update_ppr(apic);
1357                 val = kvm_lapic_get_reg(apic, offset);
1358                 break;
1359         case APIC_TASKPRI:
1360                 report_tpr_access(apic, false);
1361                 fallthrough;
1362         default:
1363                 val = kvm_lapic_get_reg(apic, offset);
1364                 break;
1365         }
1366
1367         return val;
1368 }
1369
1370 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1371 {
1372         return container_of(dev, struct kvm_lapic, dev);
1373 }
1374
1375 #define APIC_REG_MASK(reg)      (1ull << ((reg) >> 4))
1376 #define APIC_REGS_MASK(first, count) \
1377         (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1378
1379 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1380                 void *data)
1381 {
1382         unsigned char alignment = offset & 0xf;
1383         u32 result;
1384         /* this bitmask has a bit cleared for each reserved register */
1385         u64 valid_reg_mask =
1386                 APIC_REG_MASK(APIC_ID) |
1387                 APIC_REG_MASK(APIC_LVR) |
1388                 APIC_REG_MASK(APIC_TASKPRI) |
1389                 APIC_REG_MASK(APIC_PROCPRI) |
1390                 APIC_REG_MASK(APIC_LDR) |
1391                 APIC_REG_MASK(APIC_DFR) |
1392                 APIC_REG_MASK(APIC_SPIV) |
1393                 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1394                 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1395                 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1396                 APIC_REG_MASK(APIC_ESR) |
1397                 APIC_REG_MASK(APIC_ICR) |
1398                 APIC_REG_MASK(APIC_ICR2) |
1399                 APIC_REG_MASK(APIC_LVTT) |
1400                 APIC_REG_MASK(APIC_LVTTHMR) |
1401                 APIC_REG_MASK(APIC_LVTPC) |
1402                 APIC_REG_MASK(APIC_LVT0) |
1403                 APIC_REG_MASK(APIC_LVT1) |
1404                 APIC_REG_MASK(APIC_LVTERR) |
1405                 APIC_REG_MASK(APIC_TMICT) |
1406                 APIC_REG_MASK(APIC_TMCCT) |
1407                 APIC_REG_MASK(APIC_TDCR);
1408
1409         /* ARBPRI is not valid on x2APIC */
1410         if (!apic_x2apic_mode(apic))
1411                 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1412
1413         if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1414                 return 1;
1415
1416         result = __apic_read(apic, offset & ~0xf);
1417
1418         trace_kvm_apic_read(offset, result);
1419
1420         switch (len) {
1421         case 1:
1422         case 2:
1423         case 4:
1424                 memcpy(data, (char *)&result + alignment, len);
1425                 break;
1426         default:
1427                 printk(KERN_ERR "Local APIC read with len = %x, "
1428                        "should be 1,2, or 4 instead\n", len);
1429                 break;
1430         }
1431         return 0;
1432 }
1433 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1434
1435 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1436 {
1437         return addr >= apic->base_address &&
1438                 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1439 }
1440
1441 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1442                            gpa_t address, int len, void *data)
1443 {
1444         struct kvm_lapic *apic = to_lapic(this);
1445         u32 offset = address - apic->base_address;
1446
1447         if (!apic_mmio_in_range(apic, address))
1448                 return -EOPNOTSUPP;
1449
1450         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1451                 if (!kvm_check_has_quirk(vcpu->kvm,
1452                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1453                         return -EOPNOTSUPP;
1454
1455                 memset(data, 0xff, len);
1456                 return 0;
1457         }
1458
1459         kvm_lapic_reg_read(apic, offset, len, data);
1460
1461         return 0;
1462 }
1463
1464 static void update_divide_count(struct kvm_lapic *apic)
1465 {
1466         u32 tmp1, tmp2, tdcr;
1467
1468         tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1469         tmp1 = tdcr & 0xf;
1470         tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1471         apic->divide_count = 0x1 << (tmp2 & 0x7);
1472 }
1473
1474 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1475 {
1476         /*
1477          * Do not allow the guest to program periodic timers with small
1478          * interval, since the hrtimers are not throttled by the host
1479          * scheduler.
1480          */
1481         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1482                 s64 min_period = min_timer_period_us * 1000LL;
1483
1484                 if (apic->lapic_timer.period < min_period) {
1485                         pr_info_ratelimited(
1486                             "kvm: vcpu %i: requested %lld ns "
1487                             "lapic timer period limited to %lld ns\n",
1488                             apic->vcpu->vcpu_id,
1489                             apic->lapic_timer.period, min_period);
1490                         apic->lapic_timer.period = min_period;
1491                 }
1492         }
1493 }
1494
1495 static void cancel_hv_timer(struct kvm_lapic *apic);
1496
1497 static void apic_update_lvtt(struct kvm_lapic *apic)
1498 {
1499         u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1500                         apic->lapic_timer.timer_mode_mask;
1501
1502         if (apic->lapic_timer.timer_mode != timer_mode) {
1503                 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1504                                 APIC_LVT_TIMER_TSCDEADLINE)) {
1505                         hrtimer_cancel(&apic->lapic_timer.timer);
1506                         preempt_disable();
1507                         if (apic->lapic_timer.hv_timer_in_use)
1508                                 cancel_hv_timer(apic);
1509                         preempt_enable();
1510                         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1511                         apic->lapic_timer.period = 0;
1512                         apic->lapic_timer.tscdeadline = 0;
1513                 }
1514                 apic->lapic_timer.timer_mode = timer_mode;
1515                 limit_periodic_timer_frequency(apic);
1516         }
1517 }
1518
1519 /*
1520  * On APICv, this test will cause a busy wait
1521  * during a higher-priority task.
1522  */
1523
1524 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1525 {
1526         struct kvm_lapic *apic = vcpu->arch.apic;
1527         u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1528
1529         if (kvm_apic_hw_enabled(apic)) {
1530                 int vec = reg & APIC_VECTOR_MASK;
1531                 void *bitmap = apic->regs + APIC_ISR;
1532
1533                 if (vcpu->arch.apicv_active)
1534                         bitmap = apic->regs + APIC_IRR;
1535
1536                 if (apic_test_vector(vec, bitmap))
1537                         return true;
1538         }
1539         return false;
1540 }
1541
1542 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1543 {
1544         u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1545
1546         /*
1547          * If the guest TSC is running at a different ratio than the host, then
1548          * convert the delay to nanoseconds to achieve an accurate delay.  Note
1549          * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1550          * always for VMX enabled hardware.
1551          */
1552         if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1553                 __delay(min(guest_cycles,
1554                         nsec_to_cycles(vcpu, timer_advance_ns)));
1555         } else {
1556                 u64 delay_ns = guest_cycles * 1000000ULL;
1557                 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1558                 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1559         }
1560 }
1561
1562 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1563                                               s64 advance_expire_delta)
1564 {
1565         struct kvm_lapic *apic = vcpu->arch.apic;
1566         u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1567         u64 ns;
1568
1569         /* Do not adjust for tiny fluctuations or large random spikes. */
1570         if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1571             abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1572                 return;
1573
1574         /* too early */
1575         if (advance_expire_delta < 0) {
1576                 ns = -advance_expire_delta * 1000000ULL;
1577                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1578                 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1579         } else {
1580         /* too late */
1581                 ns = advance_expire_delta * 1000000ULL;
1582                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1583                 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1584         }
1585
1586         if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1587                 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1588         apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1589 }
1590
1591 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1592 {
1593         struct kvm_lapic *apic = vcpu->arch.apic;
1594         u64 guest_tsc, tsc_deadline;
1595
1596         tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1597         apic->lapic_timer.expired_tscdeadline = 0;
1598         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1599         apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1600
1601         if (guest_tsc < tsc_deadline)
1602                 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1603
1604         if (lapic_timer_advance_dynamic)
1605                 adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1606 }
1607
1608 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1609 {
1610         if (lapic_in_kernel(vcpu) &&
1611             vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1612             vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1613             lapic_timer_int_injected(vcpu))
1614                 __kvm_wait_lapic_expire(vcpu);
1615 }
1616 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1617
1618 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1619 {
1620         struct kvm_timer *ktimer = &apic->lapic_timer;
1621
1622         kvm_apic_local_deliver(apic, APIC_LVTT);
1623         if (apic_lvtt_tscdeadline(apic)) {
1624                 ktimer->tscdeadline = 0;
1625         } else if (apic_lvtt_oneshot(apic)) {
1626                 ktimer->tscdeadline = 0;
1627                 ktimer->target_expiration = 0;
1628         }
1629 }
1630
1631 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1632 {
1633         struct kvm_vcpu *vcpu = apic->vcpu;
1634         struct kvm_timer *ktimer = &apic->lapic_timer;
1635
1636         if (atomic_read(&apic->lapic_timer.pending))
1637                 return;
1638
1639         if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1640                 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1641
1642         if (!from_timer_fn && vcpu->arch.apicv_active) {
1643                 WARN_ON(kvm_get_running_vcpu() != vcpu);
1644                 kvm_apic_inject_pending_timer_irqs(apic);
1645                 return;
1646         }
1647
1648         if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1649                 /*
1650                  * Ensure the guest's timer has truly expired before posting an
1651                  * interrupt.  Open code the relevant checks to avoid querying
1652                  * lapic_timer_int_injected(), which will be false since the
1653                  * interrupt isn't yet injected.  Waiting until after injecting
1654                  * is not an option since that won't help a posted interrupt.
1655                  */
1656                 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1657                     vcpu->arch.apic->lapic_timer.timer_advance_ns)
1658                         __kvm_wait_lapic_expire(vcpu);
1659                 kvm_apic_inject_pending_timer_irqs(apic);
1660                 return;
1661         }
1662
1663         atomic_inc(&apic->lapic_timer.pending);
1664         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1665         if (from_timer_fn)
1666                 kvm_vcpu_kick(vcpu);
1667 }
1668
1669 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1670 {
1671         struct kvm_timer *ktimer = &apic->lapic_timer;
1672         u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1673         u64 ns = 0;
1674         ktime_t expire;
1675         struct kvm_vcpu *vcpu = apic->vcpu;
1676         unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1677         unsigned long flags;
1678         ktime_t now;
1679
1680         if (unlikely(!tscdeadline || !this_tsc_khz))
1681                 return;
1682
1683         local_irq_save(flags);
1684
1685         now = ktime_get();
1686         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1687
1688         ns = (tscdeadline - guest_tsc) * 1000000ULL;
1689         do_div(ns, this_tsc_khz);
1690
1691         if (likely(tscdeadline > guest_tsc) &&
1692             likely(ns > apic->lapic_timer.timer_advance_ns)) {
1693                 expire = ktime_add_ns(now, ns);
1694                 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1695                 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1696         } else
1697                 apic_timer_expired(apic, false);
1698
1699         local_irq_restore(flags);
1700 }
1701
1702 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1703 {
1704         return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1705 }
1706
1707 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1708 {
1709         ktime_t now, remaining;
1710         u64 ns_remaining_old, ns_remaining_new;
1711
1712         apic->lapic_timer.period =
1713                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1714         limit_periodic_timer_frequency(apic);
1715
1716         now = ktime_get();
1717         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1718         if (ktime_to_ns(remaining) < 0)
1719                 remaining = 0;
1720
1721         ns_remaining_old = ktime_to_ns(remaining);
1722         ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1723                                            apic->divide_count, old_divisor);
1724
1725         apic->lapic_timer.tscdeadline +=
1726                 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1727                 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1728         apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1729 }
1730
1731 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1732 {
1733         ktime_t now;
1734         u64 tscl = rdtsc();
1735         s64 deadline;
1736
1737         now = ktime_get();
1738         apic->lapic_timer.period =
1739                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1740
1741         if (!apic->lapic_timer.period) {
1742                 apic->lapic_timer.tscdeadline = 0;
1743                 return false;
1744         }
1745
1746         limit_periodic_timer_frequency(apic);
1747         deadline = apic->lapic_timer.period;
1748
1749         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1750                 if (unlikely(count_reg != APIC_TMICT)) {
1751                         deadline = tmict_to_ns(apic,
1752                                      kvm_lapic_get_reg(apic, count_reg));
1753                         if (unlikely(deadline <= 0))
1754                                 deadline = apic->lapic_timer.period;
1755                         else if (unlikely(deadline > apic->lapic_timer.period)) {
1756                                 pr_info_ratelimited(
1757                                     "kvm: vcpu %i: requested lapic timer restore with "
1758                                     "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1759                                     "Using initial count to start timer.\n",
1760                                     apic->vcpu->vcpu_id,
1761                                     count_reg,
1762                                     kvm_lapic_get_reg(apic, count_reg),
1763                                     deadline, apic->lapic_timer.period);
1764                                 kvm_lapic_set_reg(apic, count_reg, 0);
1765                                 deadline = apic->lapic_timer.period;
1766                         }
1767                 }
1768         }
1769
1770         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1771                 nsec_to_cycles(apic->vcpu, deadline);
1772         apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1773
1774         return true;
1775 }
1776
1777 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1778 {
1779         ktime_t now = ktime_get();
1780         u64 tscl = rdtsc();
1781         ktime_t delta;
1782
1783         /*
1784          * Synchronize both deadlines to the same time source or
1785          * differences in the periods (caused by differences in the
1786          * underlying clocks or numerical approximation errors) will
1787          * cause the two to drift apart over time as the errors
1788          * accumulate.
1789          */
1790         apic->lapic_timer.target_expiration =
1791                 ktime_add_ns(apic->lapic_timer.target_expiration,
1792                                 apic->lapic_timer.period);
1793         delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1794         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1795                 nsec_to_cycles(apic->vcpu, delta);
1796 }
1797
1798 static void start_sw_period(struct kvm_lapic *apic)
1799 {
1800         if (!apic->lapic_timer.period)
1801                 return;
1802
1803         if (ktime_after(ktime_get(),
1804                         apic->lapic_timer.target_expiration)) {
1805                 apic_timer_expired(apic, false);
1806
1807                 if (apic_lvtt_oneshot(apic))
1808                         return;
1809
1810                 advance_periodic_target_expiration(apic);
1811         }
1812
1813         hrtimer_start(&apic->lapic_timer.timer,
1814                 apic->lapic_timer.target_expiration,
1815                 HRTIMER_MODE_ABS_HARD);
1816 }
1817
1818 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1819 {
1820         if (!lapic_in_kernel(vcpu))
1821                 return false;
1822
1823         return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1824 }
1825 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1826
1827 static void cancel_hv_timer(struct kvm_lapic *apic)
1828 {
1829         WARN_ON(preemptible());
1830         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1831         static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1832         apic->lapic_timer.hv_timer_in_use = false;
1833 }
1834
1835 static bool start_hv_timer(struct kvm_lapic *apic)
1836 {
1837         struct kvm_timer *ktimer = &apic->lapic_timer;
1838         struct kvm_vcpu *vcpu = apic->vcpu;
1839         bool expired;
1840
1841         WARN_ON(preemptible());
1842         if (!kvm_can_use_hv_timer(vcpu))
1843                 return false;
1844
1845         if (!ktimer->tscdeadline)
1846                 return false;
1847
1848         if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1849                 return false;
1850
1851         ktimer->hv_timer_in_use = true;
1852         hrtimer_cancel(&ktimer->timer);
1853
1854         /*
1855          * To simplify handling the periodic timer, leave the hv timer running
1856          * even if the deadline timer has expired, i.e. rely on the resulting
1857          * VM-Exit to recompute the periodic timer's target expiration.
1858          */
1859         if (!apic_lvtt_period(apic)) {
1860                 /*
1861                  * Cancel the hv timer if the sw timer fired while the hv timer
1862                  * was being programmed, or if the hv timer itself expired.
1863                  */
1864                 if (atomic_read(&ktimer->pending)) {
1865                         cancel_hv_timer(apic);
1866                 } else if (expired) {
1867                         apic_timer_expired(apic, false);
1868                         cancel_hv_timer(apic);
1869                 }
1870         }
1871
1872         trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1873
1874         return true;
1875 }
1876
1877 static void start_sw_timer(struct kvm_lapic *apic)
1878 {
1879         struct kvm_timer *ktimer = &apic->lapic_timer;
1880
1881         WARN_ON(preemptible());
1882         if (apic->lapic_timer.hv_timer_in_use)
1883                 cancel_hv_timer(apic);
1884         if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1885                 return;
1886
1887         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1888                 start_sw_period(apic);
1889         else if (apic_lvtt_tscdeadline(apic))
1890                 start_sw_tscdeadline(apic);
1891         trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1892 }
1893
1894 static void restart_apic_timer(struct kvm_lapic *apic)
1895 {
1896         preempt_disable();
1897
1898         if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1899                 goto out;
1900
1901         if (!start_hv_timer(apic))
1902                 start_sw_timer(apic);
1903 out:
1904         preempt_enable();
1905 }
1906
1907 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1908 {
1909         struct kvm_lapic *apic = vcpu->arch.apic;
1910
1911         preempt_disable();
1912         /* If the preempt notifier has already run, it also called apic_timer_expired */
1913         if (!apic->lapic_timer.hv_timer_in_use)
1914                 goto out;
1915         WARN_ON(rcuwait_active(&vcpu->wait));
1916         apic_timer_expired(apic, false);
1917         cancel_hv_timer(apic);
1918
1919         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1920                 advance_periodic_target_expiration(apic);
1921                 restart_apic_timer(apic);
1922         }
1923 out:
1924         preempt_enable();
1925 }
1926 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1927
1928 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1929 {
1930         restart_apic_timer(vcpu->arch.apic);
1931 }
1932 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1933
1934 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1935 {
1936         struct kvm_lapic *apic = vcpu->arch.apic;
1937
1938         preempt_disable();
1939         /* Possibly the TSC deadline timer is not enabled yet */
1940         if (apic->lapic_timer.hv_timer_in_use)
1941                 start_sw_timer(apic);
1942         preempt_enable();
1943 }
1944 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1945
1946 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1947 {
1948         struct kvm_lapic *apic = vcpu->arch.apic;
1949
1950         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1951         restart_apic_timer(apic);
1952 }
1953
1954 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1955 {
1956         atomic_set(&apic->lapic_timer.pending, 0);
1957
1958         if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1959             && !set_target_expiration(apic, count_reg))
1960                 return;
1961
1962         restart_apic_timer(apic);
1963 }
1964
1965 static void start_apic_timer(struct kvm_lapic *apic)
1966 {
1967         __start_apic_timer(apic, APIC_TMICT);
1968 }
1969
1970 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1971 {
1972         bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1973
1974         if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1975                 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1976                 if (lvt0_in_nmi_mode) {
1977                         atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1978                 } else
1979                         atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1980         }
1981 }
1982
1983 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1984 {
1985         int ret = 0;
1986
1987         trace_kvm_apic_write(reg, val);
1988
1989         switch (reg) {
1990         case APIC_ID:           /* Local APIC ID */
1991                 if (!apic_x2apic_mode(apic))
1992                         kvm_apic_set_xapic_id(apic, val >> 24);
1993                 else
1994                         ret = 1;
1995                 break;
1996
1997         case APIC_TASKPRI:
1998                 report_tpr_access(apic, true);
1999                 apic_set_tpr(apic, val & 0xff);
2000                 break;
2001
2002         case APIC_EOI:
2003                 apic_set_eoi(apic);
2004                 break;
2005
2006         case APIC_LDR:
2007                 if (!apic_x2apic_mode(apic))
2008                         kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2009                 else
2010                         ret = 1;
2011                 break;
2012
2013         case APIC_DFR:
2014                 if (!apic_x2apic_mode(apic))
2015                         kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2016                 else
2017                         ret = 1;
2018                 break;
2019
2020         case APIC_SPIV: {
2021                 u32 mask = 0x3ff;
2022                 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2023                         mask |= APIC_SPIV_DIRECTED_EOI;
2024                 apic_set_spiv(apic, val & mask);
2025                 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2026                         int i;
2027                         u32 lvt_val;
2028
2029                         for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2030                                 lvt_val = kvm_lapic_get_reg(apic,
2031                                                        APIC_LVTT + 0x10 * i);
2032                                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2033                                              lvt_val | APIC_LVT_MASKED);
2034                         }
2035                         apic_update_lvtt(apic);
2036                         atomic_set(&apic->lapic_timer.pending, 0);
2037
2038                 }
2039                 break;
2040         }
2041         case APIC_ICR:
2042                 /* No delay here, so we always clear the pending bit */
2043                 val &= ~(1 << 12);
2044                 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2045                 kvm_lapic_set_reg(apic, APIC_ICR, val);
2046                 break;
2047
2048         case APIC_ICR2:
2049                 if (!apic_x2apic_mode(apic))
2050                         val &= 0xff000000;
2051                 kvm_lapic_set_reg(apic, APIC_ICR2, val);
2052                 break;
2053
2054         case APIC_LVT0:
2055                 apic_manage_nmi_watchdog(apic, val);
2056                 fallthrough;
2057         case APIC_LVTTHMR:
2058         case APIC_LVTPC:
2059         case APIC_LVT1:
2060         case APIC_LVTERR: {
2061                 /* TODO: Check vector */
2062                 size_t size;
2063                 u32 index;
2064
2065                 if (!kvm_apic_sw_enabled(apic))
2066                         val |= APIC_LVT_MASKED;
2067                 size = ARRAY_SIZE(apic_lvt_mask);
2068                 index = array_index_nospec(
2069                                 (reg - APIC_LVTT) >> 4, size);
2070                 val &= apic_lvt_mask[index];
2071                 kvm_lapic_set_reg(apic, reg, val);
2072                 break;
2073         }
2074
2075         case APIC_LVTT:
2076                 if (!kvm_apic_sw_enabled(apic))
2077                         val |= APIC_LVT_MASKED;
2078                 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2079                 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2080                 apic_update_lvtt(apic);
2081                 break;
2082
2083         case APIC_TMICT:
2084                 if (apic_lvtt_tscdeadline(apic))
2085                         break;
2086
2087                 hrtimer_cancel(&apic->lapic_timer.timer);
2088                 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2089                 start_apic_timer(apic);
2090                 break;
2091
2092         case APIC_TDCR: {
2093                 uint32_t old_divisor = apic->divide_count;
2094
2095                 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2096                 update_divide_count(apic);
2097                 if (apic->divide_count != old_divisor &&
2098                                 apic->lapic_timer.period) {
2099                         hrtimer_cancel(&apic->lapic_timer.timer);
2100                         update_target_expiration(apic, old_divisor);
2101                         restart_apic_timer(apic);
2102                 }
2103                 break;
2104         }
2105         case APIC_ESR:
2106                 if (apic_x2apic_mode(apic) && val != 0)
2107                         ret = 1;
2108                 break;
2109
2110         case APIC_SELF_IPI:
2111                 if (apic_x2apic_mode(apic)) {
2112                         kvm_lapic_reg_write(apic, APIC_ICR,
2113                                             APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2114                 } else
2115                         ret = 1;
2116                 break;
2117         default:
2118                 ret = 1;
2119                 break;
2120         }
2121
2122         kvm_recalculate_apic_map(apic->vcpu->kvm);
2123
2124         return ret;
2125 }
2126 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2127
2128 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2129                             gpa_t address, int len, const void *data)
2130 {
2131         struct kvm_lapic *apic = to_lapic(this);
2132         unsigned int offset = address - apic->base_address;
2133         u32 val;
2134
2135         if (!apic_mmio_in_range(apic, address))
2136                 return -EOPNOTSUPP;
2137
2138         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2139                 if (!kvm_check_has_quirk(vcpu->kvm,
2140                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2141                         return -EOPNOTSUPP;
2142
2143                 return 0;
2144         }
2145
2146         /*
2147          * APIC register must be aligned on 128-bits boundary.
2148          * 32/64/128 bits registers must be accessed thru 32 bits.
2149          * Refer SDM 8.4.1
2150          */
2151         if (len != 4 || (offset & 0xf))
2152                 return 0;
2153
2154         val = *(u32*)data;
2155
2156         kvm_lapic_reg_write(apic, offset & 0xff0, val);
2157
2158         return 0;
2159 }
2160
2161 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2162 {
2163         kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2164 }
2165 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2166
2167 /* emulate APIC access in a trap manner */
2168 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2169 {
2170         u32 val = 0;
2171
2172         /* hw has done the conditional check and inst decode */
2173         offset &= 0xff0;
2174
2175         kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2176
2177         /* TODO: optimize to just emulate side effect w/o one more write */
2178         kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2179 }
2180 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2181
2182 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2183 {
2184         struct kvm_lapic *apic = vcpu->arch.apic;
2185
2186         if (!vcpu->arch.apic)
2187                 return;
2188
2189         hrtimer_cancel(&apic->lapic_timer.timer);
2190
2191         if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2192                 static_branch_slow_dec_deferred(&apic_hw_disabled);
2193
2194         if (!apic->sw_enabled)
2195                 static_branch_slow_dec_deferred(&apic_sw_disabled);
2196
2197         if (apic->regs)
2198                 free_page((unsigned long)apic->regs);
2199
2200         kfree(apic);
2201 }
2202
2203 /*
2204  *----------------------------------------------------------------------
2205  * LAPIC interface
2206  *----------------------------------------------------------------------
2207  */
2208 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2209 {
2210         struct kvm_lapic *apic = vcpu->arch.apic;
2211
2212         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2213                 return 0;
2214
2215         return apic->lapic_timer.tscdeadline;
2216 }
2217
2218 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2219 {
2220         struct kvm_lapic *apic = vcpu->arch.apic;
2221
2222         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2223                 return;
2224
2225         hrtimer_cancel(&apic->lapic_timer.timer);
2226         apic->lapic_timer.tscdeadline = data;
2227         start_apic_timer(apic);
2228 }
2229
2230 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2231 {
2232         struct kvm_lapic *apic = vcpu->arch.apic;
2233
2234         apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2235                      | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2236 }
2237
2238 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2239 {
2240         u64 tpr;
2241
2242         tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2243
2244         return (tpr & 0xf0) >> 4;
2245 }
2246
2247 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2248 {
2249         u64 old_value = vcpu->arch.apic_base;
2250         struct kvm_lapic *apic = vcpu->arch.apic;
2251
2252         if (!apic)
2253                 value |= MSR_IA32_APICBASE_BSP;
2254
2255         vcpu->arch.apic_base = value;
2256
2257         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2258                 kvm_update_cpuid_runtime(vcpu);
2259
2260         if (!apic)
2261                 return;
2262
2263         /* update jump label if enable bit changes */
2264         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2265                 if (value & MSR_IA32_APICBASE_ENABLE) {
2266                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2267                         static_branch_slow_dec_deferred(&apic_hw_disabled);
2268                         /* Check if there are APF page ready requests pending */
2269                         kvm_make_request(KVM_REQ_APF_READY, vcpu);
2270                 } else {
2271                         static_branch_inc(&apic_hw_disabled.key);
2272                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2273                 }
2274         }
2275
2276         if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2277                 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2278
2279         if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2280                 static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2281
2282         apic->base_address = apic->vcpu->arch.apic_base &
2283                              MSR_IA32_APICBASE_BASE;
2284
2285         if ((value & MSR_IA32_APICBASE_ENABLE) &&
2286              apic->base_address != APIC_DEFAULT_PHYS_BASE)
2287                 pr_warn_once("APIC base relocation is unsupported by KVM");
2288 }
2289
2290 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2291 {
2292         struct kvm_lapic *apic = vcpu->arch.apic;
2293
2294         if (vcpu->arch.apicv_active) {
2295                 /* irr_pending is always true when apicv is activated. */
2296                 apic->irr_pending = true;
2297                 apic->isr_count = 1;
2298         } else {
2299                 apic->irr_pending = (apic_search_irr(apic) != -1);
2300                 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2301         }
2302 }
2303 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2304
2305 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2306 {
2307         struct kvm_lapic *apic = vcpu->arch.apic;
2308         int i;
2309
2310         if (!apic)
2311                 return;
2312
2313         /* Stop the timer in case it's a reset to an active apic */
2314         hrtimer_cancel(&apic->lapic_timer.timer);
2315
2316         if (!init_event) {
2317                 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2318                                          MSR_IA32_APICBASE_ENABLE);
2319                 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2320         }
2321         kvm_apic_set_version(apic->vcpu);
2322
2323         for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2324                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2325         apic_update_lvtt(apic);
2326         if (kvm_vcpu_is_reset_bsp(vcpu) &&
2327             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2328                 kvm_lapic_set_reg(apic, APIC_LVT0,
2329                              SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2330         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2331
2332         kvm_apic_set_dfr(apic, 0xffffffffU);
2333         apic_set_spiv(apic, 0xff);
2334         kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2335         if (!apic_x2apic_mode(apic))
2336                 kvm_apic_set_ldr(apic, 0);
2337         kvm_lapic_set_reg(apic, APIC_ESR, 0);
2338         kvm_lapic_set_reg(apic, APIC_ICR, 0);
2339         kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2340         kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2341         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2342         for (i = 0; i < 8; i++) {
2343                 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2344                 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2345                 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2346         }
2347         kvm_apic_update_apicv(vcpu);
2348         apic->highest_isr_cache = -1;
2349         update_divide_count(apic);
2350         atomic_set(&apic->lapic_timer.pending, 0);
2351         if (kvm_vcpu_is_bsp(vcpu))
2352                 kvm_lapic_set_base(vcpu,
2353                                 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2354         vcpu->arch.pv_eoi.msr_val = 0;
2355         apic_update_ppr(apic);
2356         if (vcpu->arch.apicv_active) {
2357                 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2358                 static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2359                 static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2360         }
2361
2362         vcpu->arch.apic_arb_prio = 0;
2363         vcpu->arch.apic_attention = 0;
2364
2365         kvm_recalculate_apic_map(vcpu->kvm);
2366 }
2367
2368 /*
2369  *----------------------------------------------------------------------
2370  * timer interface
2371  *----------------------------------------------------------------------
2372  */
2373
2374 static bool lapic_is_periodic(struct kvm_lapic *apic)
2375 {
2376         return apic_lvtt_period(apic);
2377 }
2378
2379 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2380 {
2381         struct kvm_lapic *apic = vcpu->arch.apic;
2382
2383         if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2384                 return atomic_read(&apic->lapic_timer.pending);
2385
2386         return 0;
2387 }
2388
2389 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2390 {
2391         u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2392         int vector, mode, trig_mode;
2393
2394         if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2395                 vector = reg & APIC_VECTOR_MASK;
2396                 mode = reg & APIC_MODE_MASK;
2397                 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2398                 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2399                                         NULL);
2400         }
2401         return 0;
2402 }
2403
2404 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2405 {
2406         struct kvm_lapic *apic = vcpu->arch.apic;
2407
2408         if (apic)
2409                 kvm_apic_local_deliver(apic, APIC_LVT0);
2410 }
2411
2412 static const struct kvm_io_device_ops apic_mmio_ops = {
2413         .read     = apic_mmio_read,
2414         .write    = apic_mmio_write,
2415 };
2416
2417 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2418 {
2419         struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2420         struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2421
2422         apic_timer_expired(apic, true);
2423
2424         if (lapic_is_periodic(apic)) {
2425                 advance_periodic_target_expiration(apic);
2426                 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2427                 return HRTIMER_RESTART;
2428         } else
2429                 return HRTIMER_NORESTART;
2430 }
2431
2432 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2433 {
2434         struct kvm_lapic *apic;
2435
2436         ASSERT(vcpu != NULL);
2437
2438         apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2439         if (!apic)
2440                 goto nomem;
2441
2442         vcpu->arch.apic = apic;
2443
2444         apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2445         if (!apic->regs) {
2446                 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2447                        vcpu->vcpu_id);
2448                 goto nomem_free_apic;
2449         }
2450         apic->vcpu = vcpu;
2451
2452         hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2453                      HRTIMER_MODE_ABS_HARD);
2454         apic->lapic_timer.timer.function = apic_timer_fn;
2455         if (timer_advance_ns == -1) {
2456                 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2457                 lapic_timer_advance_dynamic = true;
2458         } else {
2459                 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2460                 lapic_timer_advance_dynamic = false;
2461         }
2462
2463         /*
2464          * APIC is created enabled. This will prevent kvm_lapic_set_base from
2465          * thinking that APIC state has changed.
2466          */
2467         vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2468         static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2469         kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2470
2471         return 0;
2472 nomem_free_apic:
2473         kfree(apic);
2474         vcpu->arch.apic = NULL;
2475 nomem:
2476         return -ENOMEM;
2477 }
2478
2479 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2480 {
2481         struct kvm_lapic *apic = vcpu->arch.apic;
2482         u32 ppr;
2483
2484         if (!kvm_apic_present(vcpu))
2485                 return -1;
2486
2487         __apic_update_ppr(apic, &ppr);
2488         return apic_has_interrupt_for_ppr(apic, ppr);
2489 }
2490 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2491
2492 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2493 {
2494         u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2495
2496         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2497                 return 1;
2498         if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2499             GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2500                 return 1;
2501         return 0;
2502 }
2503
2504 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2505 {
2506         struct kvm_lapic *apic = vcpu->arch.apic;
2507
2508         if (atomic_read(&apic->lapic_timer.pending) > 0) {
2509                 kvm_apic_inject_pending_timer_irqs(apic);
2510                 atomic_set(&apic->lapic_timer.pending, 0);
2511         }
2512 }
2513
2514 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2515 {
2516         int vector = kvm_apic_has_interrupt(vcpu);
2517         struct kvm_lapic *apic = vcpu->arch.apic;
2518         u32 ppr;
2519
2520         if (vector == -1)
2521                 return -1;
2522
2523         /*
2524          * We get here even with APIC virtualization enabled, if doing
2525          * nested virtualization and L1 runs with the "acknowledge interrupt
2526          * on exit" mode.  Then we cannot inject the interrupt via RVI,
2527          * because the process would deliver it through the IDT.
2528          */
2529
2530         apic_clear_irr(vector, apic);
2531         if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2532                 /*
2533                  * For auto-EOI interrupts, there might be another pending
2534                  * interrupt above PPR, so check whether to raise another
2535                  * KVM_REQ_EVENT.
2536                  */
2537                 apic_update_ppr(apic);
2538         } else {
2539                 /*
2540                  * For normal interrupts, PPR has been raised and there cannot
2541                  * be a higher-priority pending interrupt---except if there was
2542                  * a concurrent interrupt injection, but that would have
2543                  * triggered KVM_REQ_EVENT already.
2544                  */
2545                 apic_set_isr(vector, apic);
2546                 __apic_update_ppr(apic, &ppr);
2547         }
2548
2549         return vector;
2550 }
2551
2552 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2553                 struct kvm_lapic_state *s, bool set)
2554 {
2555         if (apic_x2apic_mode(vcpu->arch.apic)) {
2556                 u32 *id = (u32 *)(s->regs + APIC_ID);
2557                 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2558
2559                 if (vcpu->kvm->arch.x2apic_format) {
2560                         if (*id != vcpu->vcpu_id)
2561                                 return -EINVAL;
2562                 } else {
2563                         if (set)
2564                                 *id >>= 24;
2565                         else
2566                                 *id <<= 24;
2567                 }
2568
2569                 /* In x2APIC mode, the LDR is fixed and based on the id */
2570                 if (set)
2571                         *ldr = kvm_apic_calc_x2apic_ldr(*id);
2572         }
2573
2574         return 0;
2575 }
2576
2577 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2578 {
2579         memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2580
2581         /*
2582          * Get calculated timer current count for remaining timer period (if
2583          * any) and store it in the returned register set.
2584          */
2585         __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2586                             __apic_read(vcpu->arch.apic, APIC_TMCCT));
2587
2588         return kvm_apic_state_fixup(vcpu, s, false);
2589 }
2590
2591 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2592 {
2593         struct kvm_lapic *apic = vcpu->arch.apic;
2594         int r;
2595
2596         kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2597         /* set SPIV separately to get count of SW disabled APICs right */
2598         apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2599
2600         r = kvm_apic_state_fixup(vcpu, s, true);
2601         if (r) {
2602                 kvm_recalculate_apic_map(vcpu->kvm);
2603                 return r;
2604         }
2605         memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2606
2607         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2608         kvm_recalculate_apic_map(vcpu->kvm);
2609         kvm_apic_set_version(vcpu);
2610
2611         apic_update_ppr(apic);
2612         hrtimer_cancel(&apic->lapic_timer.timer);
2613         apic->lapic_timer.expired_tscdeadline = 0;
2614         apic_update_lvtt(apic);
2615         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2616         update_divide_count(apic);
2617         __start_apic_timer(apic, APIC_TMCCT);
2618         kvm_apic_update_apicv(vcpu);
2619         apic->highest_isr_cache = -1;
2620         if (vcpu->arch.apicv_active) {
2621                 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2622                 static_call(kvm_x86_hwapic_irr_update)(vcpu,
2623                                 apic_find_highest_irr(apic));
2624                 static_call(kvm_x86_hwapic_isr_update)(vcpu,
2625                                 apic_find_highest_isr(apic));
2626         }
2627         kvm_make_request(KVM_REQ_EVENT, vcpu);
2628         if (ioapic_in_kernel(vcpu->kvm))
2629                 kvm_rtc_eoi_tracking_restore_one(vcpu);
2630
2631         vcpu->arch.apic_arb_prio = 0;
2632
2633         return 0;
2634 }
2635
2636 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2637 {
2638         struct hrtimer *timer;
2639
2640         if (!lapic_in_kernel(vcpu) ||
2641                 kvm_can_post_timer_interrupt(vcpu))
2642                 return;
2643
2644         timer = &vcpu->arch.apic->lapic_timer.timer;
2645         if (hrtimer_cancel(timer))
2646                 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2647 }
2648
2649 /*
2650  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2651  *
2652  * Detect whether guest triggered PV EOI since the
2653  * last entry. If yes, set EOI on guests's behalf.
2654  * Clear PV EOI in guest memory in any case.
2655  */
2656 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2657                                         struct kvm_lapic *apic)
2658 {
2659         bool pending;
2660         int vector;
2661         /*
2662          * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2663          * and KVM_PV_EOI_ENABLED in guest memory as follows:
2664          *
2665          * KVM_APIC_PV_EOI_PENDING is unset:
2666          *      -> host disabled PV EOI.
2667          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2668          *      -> host enabled PV EOI, guest did not execute EOI yet.
2669          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2670          *      -> host enabled PV EOI, guest executed EOI.
2671          */
2672         BUG_ON(!pv_eoi_enabled(vcpu));
2673         pending = pv_eoi_get_pending(vcpu);
2674         /*
2675          * Clear pending bit in any case: it will be set again on vmentry.
2676          * While this might not be ideal from performance point of view,
2677          * this makes sure pv eoi is only enabled when we know it's safe.
2678          */
2679         pv_eoi_clr_pending(vcpu);
2680         if (pending)
2681                 return;
2682         vector = apic_set_eoi(apic);
2683         trace_kvm_pv_eoi(apic, vector);
2684 }
2685
2686 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2687 {
2688         u32 data;
2689
2690         if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2691                 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2692
2693         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2694                 return;
2695
2696         if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2697                                   sizeof(u32)))
2698                 return;
2699
2700         apic_set_tpr(vcpu->arch.apic, data & 0xff);
2701 }
2702
2703 /*
2704  * apic_sync_pv_eoi_to_guest - called before vmentry
2705  *
2706  * Detect whether it's safe to enable PV EOI and
2707  * if yes do so.
2708  */
2709 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2710                                         struct kvm_lapic *apic)
2711 {
2712         if (!pv_eoi_enabled(vcpu) ||
2713             /* IRR set or many bits in ISR: could be nested. */
2714             apic->irr_pending ||
2715             /* Cache not set: could be safe but we don't bother. */
2716             apic->highest_isr_cache == -1 ||
2717             /* Need EOI to update ioapic. */
2718             kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2719                 /*
2720                  * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2721                  * so we need not do anything here.
2722                  */
2723                 return;
2724         }
2725
2726         pv_eoi_set_pending(apic->vcpu);
2727 }
2728
2729 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2730 {
2731         u32 data, tpr;
2732         int max_irr, max_isr;
2733         struct kvm_lapic *apic = vcpu->arch.apic;
2734
2735         apic_sync_pv_eoi_to_guest(vcpu, apic);
2736
2737         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2738                 return;
2739
2740         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2741         max_irr = apic_find_highest_irr(apic);
2742         if (max_irr < 0)
2743                 max_irr = 0;
2744         max_isr = apic_find_highest_isr(apic);
2745         if (max_isr < 0)
2746                 max_isr = 0;
2747         data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2748
2749         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2750                                 sizeof(u32));
2751 }
2752
2753 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2754 {
2755         if (vapic_addr) {
2756                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2757                                         &vcpu->arch.apic->vapic_cache,
2758                                         vapic_addr, sizeof(u32)))
2759                         return -EINVAL;
2760                 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2761         } else {
2762                 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2763         }
2764
2765         vcpu->arch.apic->vapic_addr = vapic_addr;
2766         return 0;
2767 }
2768
2769 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2770 {
2771         struct kvm_lapic *apic = vcpu->arch.apic;
2772         u32 reg = (msr - APIC_BASE_MSR) << 4;
2773
2774         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2775                 return 1;
2776
2777         if (reg == APIC_ICR2)
2778                 return 1;
2779
2780         /* if this is ICR write vector before command */
2781         if (reg == APIC_ICR)
2782                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2783         return kvm_lapic_reg_write(apic, reg, (u32)data);
2784 }
2785
2786 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2787 {
2788         struct kvm_lapic *apic = vcpu->arch.apic;
2789         u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2790
2791         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2792                 return 1;
2793
2794         if (reg == APIC_DFR || reg == APIC_ICR2)
2795                 return 1;
2796
2797         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2798                 return 1;
2799         if (reg == APIC_ICR)
2800                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2801
2802         *data = (((u64)high) << 32) | low;
2803
2804         return 0;
2805 }
2806
2807 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2808 {
2809         struct kvm_lapic *apic = vcpu->arch.apic;
2810
2811         if (!lapic_in_kernel(vcpu))
2812                 return 1;
2813
2814         /* if this is ICR write vector before command */
2815         if (reg == APIC_ICR)
2816                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2817         return kvm_lapic_reg_write(apic, reg, (u32)data);
2818 }
2819
2820 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2821 {
2822         struct kvm_lapic *apic = vcpu->arch.apic;
2823         u32 low, high = 0;
2824
2825         if (!lapic_in_kernel(vcpu))
2826                 return 1;
2827
2828         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2829                 return 1;
2830         if (reg == APIC_ICR)
2831                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2832
2833         *data = (((u64)high) << 32) | low;
2834
2835         return 0;
2836 }
2837
2838 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2839 {
2840         u64 addr = data & ~KVM_MSR_ENABLED;
2841         struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2842         unsigned long new_len;
2843
2844         if (!IS_ALIGNED(addr, 4))
2845                 return 1;
2846
2847         vcpu->arch.pv_eoi.msr_val = data;
2848         if (!pv_eoi_enabled(vcpu))
2849                 return 0;
2850
2851         if (addr == ghc->gpa && len <= ghc->len)
2852                 new_len = ghc->len;
2853         else
2854                 new_len = len;
2855
2856         return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2857 }
2858
2859 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2860 {
2861         struct kvm_lapic *apic = vcpu->arch.apic;
2862         u8 sipi_vector;
2863         int r;
2864         unsigned long pe;
2865
2866         if (!lapic_in_kernel(vcpu))
2867                 return;
2868
2869         /*
2870          * Read pending events before calling the check_events
2871          * callback.
2872          */
2873         pe = smp_load_acquire(&apic->pending_events);
2874         if (!pe)
2875                 return;
2876
2877         if (is_guest_mode(vcpu)) {
2878                 r = kvm_check_nested_events(vcpu);
2879                 if (r < 0)
2880                         return;
2881                 /*
2882                  * If an event has happened and caused a vmexit,
2883                  * we know INITs are latched and therefore
2884                  * we will not incorrectly deliver an APIC
2885                  * event instead of a vmexit.
2886                  */
2887         }
2888
2889         /*
2890          * INITs are latched while CPU is in specific states
2891          * (SMM, VMX root mode, SVM with GIF=0).
2892          * Because a CPU cannot be in these states immediately
2893          * after it has processed an INIT signal (and thus in
2894          * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2895          * and leave the INIT pending.
2896          */
2897         if (kvm_vcpu_latch_init(vcpu)) {
2898                 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2899                 if (test_bit(KVM_APIC_SIPI, &pe))
2900                         clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2901                 return;
2902         }
2903
2904         if (test_bit(KVM_APIC_INIT, &pe)) {
2905                 clear_bit(KVM_APIC_INIT, &apic->pending_events);
2906                 kvm_vcpu_reset(vcpu, true);
2907                 if (kvm_vcpu_is_bsp(apic->vcpu))
2908                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2909                 else
2910                         vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2911         }
2912         if (test_bit(KVM_APIC_SIPI, &pe)) {
2913                 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2914                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2915                         /* evaluate pending_events before reading the vector */
2916                         smp_rmb();
2917                         sipi_vector = apic->sipi_vector;
2918                         kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2919                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2920                 }
2921         }
2922 }
2923
2924 void kvm_lapic_exit(void)
2925 {
2926         static_key_deferred_flush(&apic_hw_disabled);
2927         static_key_deferred_flush(&apic_sw_disabled);
2928 }