33fca1a691a53c7382015bd6cbd34e02c152da67
[linux-2.6-microblaze.git] / arch / arm64 / kvm / arch_timer.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <linux/cpu.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/uaccess.h>
14
15 #include <clocksource/arm_arch_timer.h>
16 #include <asm/arch_timer.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_hyp.h>
19
20 #include <kvm/arm_vgic.h>
21 #include <kvm/arm_arch_timer.h>
22
23 #include "trace.h"
24
25 static struct timecounter *timecounter;
26 static unsigned int host_vtimer_irq;
27 static unsigned int host_ptimer_irq;
28 static u32 host_vtimer_irq_flags;
29 static u32 host_ptimer_irq_flags;
30
31 static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
32
33 static const struct kvm_irq_level default_ptimer_irq = {
34         .irq    = 30,
35         .level  = 1,
36 };
37
38 static const struct kvm_irq_level default_vtimer_irq = {
39         .irq    = 27,
40         .level  = 1,
41 };
42
43 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx);
44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
45                                  struct arch_timer_context *timer_ctx);
46 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
48                                 struct arch_timer_context *timer,
49                                 enum kvm_arch_timer_regs treg,
50                                 u64 val);
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
52                               struct arch_timer_context *timer,
53                               enum kvm_arch_timer_regs treg);
54
55 u32 timer_get_ctl(struct arch_timer_context *ctxt)
56 {
57         struct kvm_vcpu *vcpu = ctxt->vcpu;
58
59         switch(arch_timer_ctx_index(ctxt)) {
60         case TIMER_VTIMER:
61                 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
62         case TIMER_PTIMER:
63                 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
64         default:
65                 WARN_ON(1);
66                 return 0;
67         }
68 }
69
70 u64 timer_get_cval(struct arch_timer_context *ctxt)
71 {
72         struct kvm_vcpu *vcpu = ctxt->vcpu;
73
74         switch(arch_timer_ctx_index(ctxt)) {
75         case TIMER_VTIMER:
76                 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
77         case TIMER_PTIMER:
78                 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
79         default:
80                 WARN_ON(1);
81                 return 0;
82         }
83 }
84
85 static u64 timer_get_offset(struct arch_timer_context *ctxt)
86 {
87         struct kvm_vcpu *vcpu = ctxt->vcpu;
88
89         switch(arch_timer_ctx_index(ctxt)) {
90         case TIMER_VTIMER:
91                 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
92         default:
93                 return 0;
94         }
95 }
96
97 static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
98 {
99         struct kvm_vcpu *vcpu = ctxt->vcpu;
100
101         switch(arch_timer_ctx_index(ctxt)) {
102         case TIMER_VTIMER:
103                 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
104                 break;
105         case TIMER_PTIMER:
106                 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
107                 break;
108         default:
109                 WARN_ON(1);
110         }
111 }
112
113 static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
114 {
115         struct kvm_vcpu *vcpu = ctxt->vcpu;
116
117         switch(arch_timer_ctx_index(ctxt)) {
118         case TIMER_VTIMER:
119                 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
120                 break;
121         case TIMER_PTIMER:
122                 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
123                 break;
124         default:
125                 WARN_ON(1);
126         }
127 }
128
129 static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
130 {
131         struct kvm_vcpu *vcpu = ctxt->vcpu;
132
133         switch(arch_timer_ctx_index(ctxt)) {
134         case TIMER_VTIMER:
135                 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
136                 break;
137         default:
138                 WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
139         }
140 }
141
142 u64 kvm_phys_timer_read(void)
143 {
144         return timecounter->cc->read(timecounter->cc);
145 }
146
147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
148 {
149         if (has_vhe()) {
150                 map->direct_vtimer = vcpu_vtimer(vcpu);
151                 map->direct_ptimer = vcpu_ptimer(vcpu);
152                 map->emul_ptimer = NULL;
153         } else {
154                 map->direct_vtimer = vcpu_vtimer(vcpu);
155                 map->direct_ptimer = NULL;
156                 map->emul_ptimer = vcpu_ptimer(vcpu);
157         }
158
159         trace_kvm_get_timer_map(vcpu->vcpu_id, map);
160 }
161
162 static inline bool userspace_irqchip(struct kvm *kvm)
163 {
164         return static_branch_unlikely(&userspace_irqchip_in_use) &&
165                 unlikely(!irqchip_in_kernel(kvm));
166 }
167
168 static void soft_timer_start(struct hrtimer *hrt, u64 ns)
169 {
170         hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
171                       HRTIMER_MODE_ABS_HARD);
172 }
173
174 static void soft_timer_cancel(struct hrtimer *hrt)
175 {
176         hrtimer_cancel(hrt);
177 }
178
179 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
180 {
181         struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
182         struct arch_timer_context *ctx;
183         struct timer_map map;
184
185         /*
186          * We may see a timer interrupt after vcpu_put() has been called which
187          * sets the CPU's vcpu pointer to NULL, because even though the timer
188          * has been disabled in timer_save_state(), the hardware interrupt
189          * signal may not have been retired from the interrupt controller yet.
190          */
191         if (!vcpu)
192                 return IRQ_HANDLED;
193
194         get_timer_map(vcpu, &map);
195
196         if (irq == host_vtimer_irq)
197                 ctx = map.direct_vtimer;
198         else
199                 ctx = map.direct_ptimer;
200
201         if (kvm_timer_should_fire(ctx))
202                 kvm_timer_update_irq(vcpu, true, ctx);
203
204         if (userspace_irqchip(vcpu->kvm) &&
205             !static_branch_unlikely(&has_gic_active_state))
206                 disable_percpu_irq(host_vtimer_irq);
207
208         return IRQ_HANDLED;
209 }
210
211 static u64 kvm_counter_compute_delta(struct arch_timer_context *timer_ctx,
212                                      u64 val)
213 {
214         u64 now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
215
216         if (now < val) {
217                 u64 ns;
218
219                 ns = cyclecounter_cyc2ns(timecounter->cc,
220                                          val - now,
221                                          timecounter->mask,
222                                          &timecounter->frac);
223                 return ns;
224         }
225
226         return 0;
227 }
228
229 static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
230 {
231         return kvm_counter_compute_delta(timer_ctx, timer_get_cval(timer_ctx));
232 }
233
234 static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
235 {
236         WARN_ON(timer_ctx && timer_ctx->loaded);
237         return timer_ctx &&
238                 ((timer_get_ctl(timer_ctx) &
239                   (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
240 }
241
242 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
243 {
244         return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
245                 vcpu_get_flag(vcpu, IN_WFIT));
246 }
247
248 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
249 {
250         struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
251         u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
252
253         return kvm_counter_compute_delta(ctx, val);
254 }
255
256 /*
257  * Returns the earliest expiration time in ns among guest timers.
258  * Note that it will return 0 if none of timers can fire.
259  */
260 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
261 {
262         u64 min_delta = ULLONG_MAX;
263         int i;
264
265         for (i = 0; i < NR_KVM_TIMERS; i++) {
266                 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i];
267
268                 WARN(ctx->loaded, "timer %d loaded\n", i);
269                 if (kvm_timer_irq_can_fire(ctx))
270                         min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
271         }
272
273         if (vcpu_has_wfit_active(vcpu))
274                 min_delta = min(min_delta, wfit_delay_ns(vcpu));
275
276         /* If none of timers can fire, then return 0 */
277         if (min_delta == ULLONG_MAX)
278                 return 0;
279
280         return min_delta;
281 }
282
283 static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
284 {
285         struct arch_timer_cpu *timer;
286         struct kvm_vcpu *vcpu;
287         u64 ns;
288
289         timer = container_of(hrt, struct arch_timer_cpu, bg_timer);
290         vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
291
292         /*
293          * Check that the timer has really expired from the guest's
294          * PoV (NTP on the host may have forced it to expire
295          * early). If we should have slept longer, restart it.
296          */
297         ns = kvm_timer_earliest_exp(vcpu);
298         if (unlikely(ns)) {
299                 hrtimer_forward_now(hrt, ns_to_ktime(ns));
300                 return HRTIMER_RESTART;
301         }
302
303         kvm_vcpu_wake_up(vcpu);
304         return HRTIMER_NORESTART;
305 }
306
307 static enum hrtimer_restart kvm_hrtimer_expire(struct hrtimer *hrt)
308 {
309         struct arch_timer_context *ctx;
310         struct kvm_vcpu *vcpu;
311         u64 ns;
312
313         ctx = container_of(hrt, struct arch_timer_context, hrtimer);
314         vcpu = ctx->vcpu;
315
316         trace_kvm_timer_hrtimer_expire(ctx);
317
318         /*
319          * Check that the timer has really expired from the guest's
320          * PoV (NTP on the host may have forced it to expire
321          * early). If not ready, schedule for a later time.
322          */
323         ns = kvm_timer_compute_delta(ctx);
324         if (unlikely(ns)) {
325                 hrtimer_forward_now(hrt, ns_to_ktime(ns));
326                 return HRTIMER_RESTART;
327         }
328
329         kvm_timer_update_irq(vcpu, true, ctx);
330         return HRTIMER_NORESTART;
331 }
332
333 static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
334 {
335         enum kvm_arch_timers index;
336         u64 cval, now;
337
338         if (!timer_ctx)
339                 return false;
340
341         index = arch_timer_ctx_index(timer_ctx);
342
343         if (timer_ctx->loaded) {
344                 u32 cnt_ctl = 0;
345
346                 switch (index) {
347                 case TIMER_VTIMER:
348                         cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL);
349                         break;
350                 case TIMER_PTIMER:
351                         cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL);
352                         break;
353                 case NR_KVM_TIMERS:
354                         /* GCC is braindead */
355                         cnt_ctl = 0;
356                         break;
357                 }
358
359                 return  (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
360                         (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
361                        !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
362         }
363
364         if (!kvm_timer_irq_can_fire(timer_ctx))
365                 return false;
366
367         cval = timer_get_cval(timer_ctx);
368         now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
369
370         return cval <= now;
371 }
372
373 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
374 {
375         return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
376 }
377
378 /*
379  * Reflect the timer output level into the kvm_run structure
380  */
381 void kvm_timer_update_run(struct kvm_vcpu *vcpu)
382 {
383         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
384         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
385         struct kvm_sync_regs *regs = &vcpu->run->s.regs;
386
387         /* Populate the device bitmap with the timer states */
388         regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
389                                     KVM_ARM_DEV_EL1_PTIMER);
390         if (kvm_timer_should_fire(vtimer))
391                 regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
392         if (kvm_timer_should_fire(ptimer))
393                 regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
394 }
395
396 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
397                                  struct arch_timer_context *timer_ctx)
398 {
399         int ret;
400
401         timer_ctx->irq.level = new_level;
402         trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
403                                    timer_ctx->irq.level);
404
405         if (!userspace_irqchip(vcpu->kvm)) {
406                 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
407                                           timer_ctx->irq.irq,
408                                           timer_ctx->irq.level,
409                                           timer_ctx);
410                 WARN_ON(ret);
411         }
412 }
413
414 /* Only called for a fully emulated timer */
415 static void timer_emulate(struct arch_timer_context *ctx)
416 {
417         bool should_fire = kvm_timer_should_fire(ctx);
418
419         trace_kvm_timer_emulate(ctx, should_fire);
420
421         if (should_fire != ctx->irq.level) {
422                 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx);
423                 return;
424         }
425
426         /*
427          * If the timer can fire now, we don't need to have a soft timer
428          * scheduled for the future.  If the timer cannot fire at all,
429          * then we also don't need a soft timer.
430          */
431         if (!kvm_timer_irq_can_fire(ctx)) {
432                 soft_timer_cancel(&ctx->hrtimer);
433                 return;
434         }
435
436         soft_timer_start(&ctx->hrtimer, kvm_timer_compute_delta(ctx));
437 }
438
439 static void timer_save_state(struct arch_timer_context *ctx)
440 {
441         struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
442         enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
443         unsigned long flags;
444
445         if (!timer->enabled)
446                 return;
447
448         local_irq_save(flags);
449
450         if (!ctx->loaded)
451                 goto out;
452
453         switch (index) {
454         case TIMER_VTIMER:
455                 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
456                 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
457
458                 /* Disable the timer */
459                 write_sysreg_el0(0, SYS_CNTV_CTL);
460                 isb();
461
462                 break;
463         case TIMER_PTIMER:
464                 timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
465                 timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
466
467                 /* Disable the timer */
468                 write_sysreg_el0(0, SYS_CNTP_CTL);
469                 isb();
470
471                 break;
472         case NR_KVM_TIMERS:
473                 BUG();
474         }
475
476         trace_kvm_timer_save_state(ctx);
477
478         ctx->loaded = false;
479 out:
480         local_irq_restore(flags);
481 }
482
483 /*
484  * Schedule the background timer before calling kvm_vcpu_halt, so that this
485  * thread is removed from its waitqueue and made runnable when there's a timer
486  * interrupt to handle.
487  */
488 static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
489 {
490         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
491         struct timer_map map;
492
493         get_timer_map(vcpu, &map);
494
495         /*
496          * If no timers are capable of raising interrupts (disabled or
497          * masked), then there's no more work for us to do.
498          */
499         if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
500             !kvm_timer_irq_can_fire(map.direct_ptimer) &&
501             !kvm_timer_irq_can_fire(map.emul_ptimer) &&
502             !vcpu_has_wfit_active(vcpu))
503                 return;
504
505         /*
506          * At least one guest time will expire. Schedule a background timer.
507          * Set the earliest expiration time among the guest timers.
508          */
509         soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu));
510 }
511
512 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu)
513 {
514         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
515
516         soft_timer_cancel(&timer->bg_timer);
517 }
518
519 static void timer_restore_state(struct arch_timer_context *ctx)
520 {
521         struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu);
522         enum kvm_arch_timers index = arch_timer_ctx_index(ctx);
523         unsigned long flags;
524
525         if (!timer->enabled)
526                 return;
527
528         local_irq_save(flags);
529
530         if (ctx->loaded)
531                 goto out;
532
533         switch (index) {
534         case TIMER_VTIMER:
535                 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
536                 isb();
537                 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
538                 break;
539         case TIMER_PTIMER:
540                 write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
541                 isb();
542                 write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
543                 break;
544         case NR_KVM_TIMERS:
545                 BUG();
546         }
547
548         trace_kvm_timer_restore_state(ctx);
549
550         ctx->loaded = true;
551 out:
552         local_irq_restore(flags);
553 }
554
555 static void set_cntvoff(u64 cntvoff)
556 {
557         kvm_call_hyp(__kvm_timer_set_cntvoff, cntvoff);
558 }
559
560 static inline void set_timer_irq_phys_active(struct arch_timer_context *ctx, bool active)
561 {
562         int r;
563         r = irq_set_irqchip_state(ctx->host_timer_irq, IRQCHIP_STATE_ACTIVE, active);
564         WARN_ON(r);
565 }
566
567 static void kvm_timer_vcpu_load_gic(struct arch_timer_context *ctx)
568 {
569         struct kvm_vcpu *vcpu = ctx->vcpu;
570         bool phys_active = false;
571
572         /*
573          * Update the timer output so that it is likely to match the
574          * state we're about to restore. If the timer expires between
575          * this point and the register restoration, we'll take the
576          * interrupt anyway.
577          */
578         kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx);
579
580         if (irqchip_in_kernel(vcpu->kvm))
581                 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq);
582
583         phys_active |= ctx->irq.level;
584
585         set_timer_irq_phys_active(ctx, phys_active);
586 }
587
588 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu)
589 {
590         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
591
592         /*
593          * Update the timer output so that it is likely to match the
594          * state we're about to restore. If the timer expires between
595          * this point and the register restoration, we'll take the
596          * interrupt anyway.
597          */
598         kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer);
599
600         /*
601          * When using a userspace irqchip with the architected timers and a
602          * host interrupt controller that doesn't support an active state, we
603          * must still prevent continuously exiting from the guest, and
604          * therefore mask the physical interrupt by disabling it on the host
605          * interrupt controller when the virtual level is high, such that the
606          * guest can make forward progress.  Once we detect the output level
607          * being de-asserted, we unmask the interrupt again so that we exit
608          * from the guest when the timer fires.
609          */
610         if (vtimer->irq.level)
611                 disable_percpu_irq(host_vtimer_irq);
612         else
613                 enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
614 }
615
616 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
617 {
618         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
619         struct timer_map map;
620
621         if (unlikely(!timer->enabled))
622                 return;
623
624         get_timer_map(vcpu, &map);
625
626         if (static_branch_likely(&has_gic_active_state)) {
627                 kvm_timer_vcpu_load_gic(map.direct_vtimer);
628                 if (map.direct_ptimer)
629                         kvm_timer_vcpu_load_gic(map.direct_ptimer);
630         } else {
631                 kvm_timer_vcpu_load_nogic(vcpu);
632         }
633
634         set_cntvoff(timer_get_offset(map.direct_vtimer));
635
636         kvm_timer_unblocking(vcpu);
637
638         timer_restore_state(map.direct_vtimer);
639         if (map.direct_ptimer)
640                 timer_restore_state(map.direct_ptimer);
641
642         if (map.emul_ptimer)
643                 timer_emulate(map.emul_ptimer);
644 }
645
646 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
647 {
648         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
649         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
650         struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
651         bool vlevel, plevel;
652
653         if (likely(irqchip_in_kernel(vcpu->kvm)))
654                 return false;
655
656         vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
657         plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
658
659         return kvm_timer_should_fire(vtimer) != vlevel ||
660                kvm_timer_should_fire(ptimer) != plevel;
661 }
662
663 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
664 {
665         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
666         struct timer_map map;
667
668         if (unlikely(!timer->enabled))
669                 return;
670
671         get_timer_map(vcpu, &map);
672
673         timer_save_state(map.direct_vtimer);
674         if (map.direct_ptimer)
675                 timer_save_state(map.direct_ptimer);
676
677         /*
678          * Cancel soft timer emulation, because the only case where we
679          * need it after a vcpu_put is in the context of a sleeping VCPU, and
680          * in that case we already factor in the deadline for the physical
681          * timer when scheduling the bg_timer.
682          *
683          * In any case, we re-schedule the hrtimer for the physical timer when
684          * coming back to the VCPU thread in kvm_timer_vcpu_load().
685          */
686         if (map.emul_ptimer)
687                 soft_timer_cancel(&map.emul_ptimer->hrtimer);
688
689         if (kvm_vcpu_is_blocking(vcpu))
690                 kvm_timer_blocking(vcpu);
691
692         /*
693          * The kernel may decide to run userspace after calling vcpu_put, so
694          * we reset cntvoff to 0 to ensure a consistent read between user
695          * accesses to the virtual counter and kernel access to the physical
696          * counter of non-VHE case. For VHE, the virtual counter uses a fixed
697          * virtual offset of zero, so no need to zero CNTVOFF_EL2 register.
698          */
699         set_cntvoff(0);
700 }
701
702 /*
703  * With a userspace irqchip we have to check if the guest de-asserted the
704  * timer and if so, unmask the timer irq signal on the host interrupt
705  * controller to ensure that we see future timer signals.
706  */
707 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
708 {
709         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
710
711         if (!kvm_timer_should_fire(vtimer)) {
712                 kvm_timer_update_irq(vcpu, false, vtimer);
713                 if (static_branch_likely(&has_gic_active_state))
714                         set_timer_irq_phys_active(vtimer, false);
715                 else
716                         enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
717         }
718 }
719
720 void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
721 {
722         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
723
724         if (unlikely(!timer->enabled))
725                 return;
726
727         if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
728                 unmask_vtimer_irq_user(vcpu);
729 }
730
731 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
732 {
733         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
734         struct timer_map map;
735
736         get_timer_map(vcpu, &map);
737
738         /*
739          * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
740          * and to 0 for ARMv7.  We provide an implementation that always
741          * resets the timer to be disabled and unmasked and is compliant with
742          * the ARMv7 architecture.
743          */
744         timer_set_ctl(vcpu_vtimer(vcpu), 0);
745         timer_set_ctl(vcpu_ptimer(vcpu), 0);
746
747         if (timer->enabled) {
748                 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
749                 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu));
750
751                 if (irqchip_in_kernel(vcpu->kvm)) {
752                         kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq);
753                         if (map.direct_ptimer)
754                                 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq);
755                 }
756         }
757
758         if (map.emul_ptimer)
759                 soft_timer_cancel(&map.emul_ptimer->hrtimer);
760
761         return 0;
762 }
763
764 /* Make the updates of cntvoff for all vtimer contexts atomic */
765 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
766 {
767         unsigned long i;
768         struct kvm *kvm = vcpu->kvm;
769         struct kvm_vcpu *tmp;
770
771         mutex_lock(&kvm->lock);
772         kvm_for_each_vcpu(i, tmp, kvm)
773                 timer_set_offset(vcpu_vtimer(tmp), cntvoff);
774
775         /*
776          * When called from the vcpu create path, the CPU being created is not
777          * included in the loop above, so we just set it here as well.
778          */
779         timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
780         mutex_unlock(&kvm->lock);
781 }
782
783 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
784 {
785         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
786         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
787         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
788
789         vtimer->vcpu = vcpu;
790         ptimer->vcpu = vcpu;
791
792         /* Synchronize cntvoff across all vtimers of a VM. */
793         update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
794         timer_set_offset(ptimer, 0);
795
796         hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
797         timer->bg_timer.function = kvm_bg_timer_expire;
798
799         hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
800         hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
801         vtimer->hrtimer.function = kvm_hrtimer_expire;
802         ptimer->hrtimer.function = kvm_hrtimer_expire;
803
804         vtimer->irq.irq = default_vtimer_irq.irq;
805         ptimer->irq.irq = default_ptimer_irq.irq;
806
807         vtimer->host_timer_irq = host_vtimer_irq;
808         ptimer->host_timer_irq = host_ptimer_irq;
809
810         vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
811         ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
812 }
813
814 void kvm_timer_cpu_up(void)
815 {
816         enable_percpu_irq(host_vtimer_irq, host_vtimer_irq_flags);
817         if (host_ptimer_irq)
818                 enable_percpu_irq(host_ptimer_irq, host_ptimer_irq_flags);
819 }
820
821 void kvm_timer_cpu_down(void)
822 {
823         disable_percpu_irq(host_vtimer_irq);
824         if (host_ptimer_irq)
825                 disable_percpu_irq(host_ptimer_irq);
826 }
827
828 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
829 {
830         struct arch_timer_context *timer;
831
832         switch (regid) {
833         case KVM_REG_ARM_TIMER_CTL:
834                 timer = vcpu_vtimer(vcpu);
835                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
836                 break;
837         case KVM_REG_ARM_TIMER_CNT:
838                 timer = vcpu_vtimer(vcpu);
839                 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value);
840                 break;
841         case KVM_REG_ARM_TIMER_CVAL:
842                 timer = vcpu_vtimer(vcpu);
843                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
844                 break;
845         case KVM_REG_ARM_PTIMER_CTL:
846                 timer = vcpu_ptimer(vcpu);
847                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value);
848                 break;
849         case KVM_REG_ARM_PTIMER_CVAL:
850                 timer = vcpu_ptimer(vcpu);
851                 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value);
852                 break;
853
854         default:
855                 return -1;
856         }
857
858         return 0;
859 }
860
861 static u64 read_timer_ctl(struct arch_timer_context *timer)
862 {
863         /*
864          * Set ISTATUS bit if it's expired.
865          * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is
866          * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
867          * regardless of ENABLE bit for our implementation convenience.
868          */
869         u32 ctl = timer_get_ctl(timer);
870
871         if (!kvm_timer_compute_delta(timer))
872                 ctl |= ARCH_TIMER_CTRL_IT_STAT;
873
874         return ctl;
875 }
876
877 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
878 {
879         switch (regid) {
880         case KVM_REG_ARM_TIMER_CTL:
881                 return kvm_arm_timer_read(vcpu,
882                                           vcpu_vtimer(vcpu), TIMER_REG_CTL);
883         case KVM_REG_ARM_TIMER_CNT:
884                 return kvm_arm_timer_read(vcpu,
885                                           vcpu_vtimer(vcpu), TIMER_REG_CNT);
886         case KVM_REG_ARM_TIMER_CVAL:
887                 return kvm_arm_timer_read(vcpu,
888                                           vcpu_vtimer(vcpu), TIMER_REG_CVAL);
889         case KVM_REG_ARM_PTIMER_CTL:
890                 return kvm_arm_timer_read(vcpu,
891                                           vcpu_ptimer(vcpu), TIMER_REG_CTL);
892         case KVM_REG_ARM_PTIMER_CNT:
893                 return kvm_arm_timer_read(vcpu,
894                                           vcpu_ptimer(vcpu), TIMER_REG_CNT);
895         case KVM_REG_ARM_PTIMER_CVAL:
896                 return kvm_arm_timer_read(vcpu,
897                                           vcpu_ptimer(vcpu), TIMER_REG_CVAL);
898         }
899         return (u64)-1;
900 }
901
902 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
903                               struct arch_timer_context *timer,
904                               enum kvm_arch_timer_regs treg)
905 {
906         u64 val;
907
908         switch (treg) {
909         case TIMER_REG_TVAL:
910                 val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
911                 val = lower_32_bits(val);
912                 break;
913
914         case TIMER_REG_CTL:
915                 val = read_timer_ctl(timer);
916                 break;
917
918         case TIMER_REG_CVAL:
919                 val = timer_get_cval(timer);
920                 break;
921
922         case TIMER_REG_CNT:
923                 val = kvm_phys_timer_read() - timer_get_offset(timer);
924                 break;
925
926         default:
927                 BUG();
928         }
929
930         return val;
931 }
932
933 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu,
934                               enum kvm_arch_timers tmr,
935                               enum kvm_arch_timer_regs treg)
936 {
937         u64 val;
938
939         preempt_disable();
940         kvm_timer_vcpu_put(vcpu);
941
942         val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg);
943
944         kvm_timer_vcpu_load(vcpu);
945         preempt_enable();
946
947         return val;
948 }
949
950 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
951                                 struct arch_timer_context *timer,
952                                 enum kvm_arch_timer_regs treg,
953                                 u64 val)
954 {
955         switch (treg) {
956         case TIMER_REG_TVAL:
957                 timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
958                 break;
959
960         case TIMER_REG_CTL:
961                 timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
962                 break;
963
964         case TIMER_REG_CVAL:
965                 timer_set_cval(timer, val);
966                 break;
967
968         default:
969                 BUG();
970         }
971 }
972
973 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
974                                 enum kvm_arch_timers tmr,
975                                 enum kvm_arch_timer_regs treg,
976                                 u64 val)
977 {
978         preempt_disable();
979         kvm_timer_vcpu_put(vcpu);
980
981         kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val);
982
983         kvm_timer_vcpu_load(vcpu);
984         preempt_enable();
985 }
986
987 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
988 {
989         if (vcpu)
990                 irqd_set_forwarded_to_vcpu(d);
991         else
992                 irqd_clr_forwarded_to_vcpu(d);
993
994         return 0;
995 }
996
997 static int timer_irq_set_irqchip_state(struct irq_data *d,
998                                        enum irqchip_irq_state which, bool val)
999 {
1000         if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
1001                 return irq_chip_set_parent_state(d, which, val);
1002
1003         if (val)
1004                 irq_chip_mask_parent(d);
1005         else
1006                 irq_chip_unmask_parent(d);
1007
1008         return 0;
1009 }
1010
1011 static void timer_irq_eoi(struct irq_data *d)
1012 {
1013         if (!irqd_is_forwarded_to_vcpu(d))
1014                 irq_chip_eoi_parent(d);
1015 }
1016
1017 static void timer_irq_ack(struct irq_data *d)
1018 {
1019         d = d->parent_data;
1020         if (d->chip->irq_ack)
1021                 d->chip->irq_ack(d);
1022 }
1023
1024 static struct irq_chip timer_chip = {
1025         .name                   = "KVM",
1026         .irq_ack                = timer_irq_ack,
1027         .irq_mask               = irq_chip_mask_parent,
1028         .irq_unmask             = irq_chip_unmask_parent,
1029         .irq_eoi                = timer_irq_eoi,
1030         .irq_set_type           = irq_chip_set_type_parent,
1031         .irq_set_vcpu_affinity  = timer_irq_set_vcpu_affinity,
1032         .irq_set_irqchip_state  = timer_irq_set_irqchip_state,
1033 };
1034
1035 static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1036                                   unsigned int nr_irqs, void *arg)
1037 {
1038         irq_hw_number_t hwirq = (uintptr_t)arg;
1039
1040         return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
1041                                              &timer_chip, NULL);
1042 }
1043
1044 static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1045                                   unsigned int nr_irqs)
1046 {
1047 }
1048
1049 static const struct irq_domain_ops timer_domain_ops = {
1050         .alloc  = timer_irq_domain_alloc,
1051         .free   = timer_irq_domain_free,
1052 };
1053
1054 static struct irq_ops arch_timer_irq_ops = {
1055         .get_input_level = kvm_arch_timer_get_input_level,
1056 };
1057
1058 static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
1059 {
1060         *flags = irq_get_trigger_type(virq);
1061         if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
1062                 kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
1063                         virq);
1064                 *flags = IRQF_TRIGGER_LOW;
1065         }
1066 }
1067
1068 static int kvm_irq_init(struct arch_timer_kvm_info *info)
1069 {
1070         struct irq_domain *domain = NULL;
1071
1072         if (info->virtual_irq <= 0) {
1073                 kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
1074                         info->virtual_irq);
1075                 return -ENODEV;
1076         }
1077
1078         host_vtimer_irq = info->virtual_irq;
1079         kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
1080
1081         if (kvm_vgic_global_state.no_hw_deactivation) {
1082                 struct fwnode_handle *fwnode;
1083                 struct irq_data *data;
1084
1085                 fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
1086                 if (!fwnode)
1087                         return -ENOMEM;
1088
1089                 /* Assume both vtimer and ptimer in the same parent */
1090                 data = irq_get_irq_data(host_vtimer_irq);
1091                 domain = irq_domain_create_hierarchy(data->domain, 0,
1092                                                      NR_KVM_TIMERS, fwnode,
1093                                                      &timer_domain_ops, NULL);
1094                 if (!domain) {
1095                         irq_domain_free_fwnode(fwnode);
1096                         return -ENOMEM;
1097                 }
1098
1099                 arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
1100                 WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
1101                                             (void *)TIMER_VTIMER));
1102         }
1103
1104         if (info->physical_irq > 0) {
1105                 host_ptimer_irq = info->physical_irq;
1106                 kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
1107
1108                 if (domain)
1109                         WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
1110                                                     (void *)TIMER_PTIMER));
1111         }
1112
1113         return 0;
1114 }
1115
1116 int kvm_timer_hyp_init(bool has_gic)
1117 {
1118         struct arch_timer_kvm_info *info;
1119         int err;
1120
1121         info = arch_timer_get_kvm_info();
1122         timecounter = &info->timecounter;
1123
1124         if (!timecounter->cc) {
1125                 kvm_err("kvm_arch_timer: uninitialized timecounter\n");
1126                 return -ENODEV;
1127         }
1128
1129         err = kvm_irq_init(info);
1130         if (err)
1131                 return err;
1132
1133         /* First, do the virtual EL1 timer irq */
1134
1135         err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
1136                                  "kvm guest vtimer", kvm_get_running_vcpus());
1137         if (err) {
1138                 kvm_err("kvm_arch_timer: can't request vtimer interrupt %d (%d)\n",
1139                         host_vtimer_irq, err);
1140                 return err;
1141         }
1142
1143         if (has_gic) {
1144                 err = irq_set_vcpu_affinity(host_vtimer_irq,
1145                                             kvm_get_running_vcpus());
1146                 if (err) {
1147                         kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1148                         goto out_free_irq;
1149                 }
1150
1151                 static_branch_enable(&has_gic_active_state);
1152         }
1153
1154         kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
1155
1156         /* Now let's do the physical EL1 timer irq */
1157
1158         if (info->physical_irq > 0) {
1159                 err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
1160                                          "kvm guest ptimer", kvm_get_running_vcpus());
1161                 if (err) {
1162                         kvm_err("kvm_arch_timer: can't request ptimer interrupt %d (%d)\n",
1163                                 host_ptimer_irq, err);
1164                         return err;
1165                 }
1166
1167                 if (has_gic) {
1168                         err = irq_set_vcpu_affinity(host_ptimer_irq,
1169                                                     kvm_get_running_vcpus());
1170                         if (err) {
1171                                 kvm_err("kvm_arch_timer: error setting vcpu affinity\n");
1172                                 goto out_free_irq;
1173                         }
1174                 }
1175
1176                 kvm_debug("physical timer IRQ%d\n", host_ptimer_irq);
1177         } else if (has_vhe()) {
1178                 kvm_err("kvm_arch_timer: invalid physical timer IRQ: %d\n",
1179                         info->physical_irq);
1180                 err = -ENODEV;
1181                 goto out_free_irq;
1182         }
1183
1184         return 0;
1185 out_free_irq:
1186         free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
1187         return err;
1188 }
1189
1190 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
1191 {
1192         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1193
1194         soft_timer_cancel(&timer->bg_timer);
1195 }
1196
1197 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
1198 {
1199         int vtimer_irq, ptimer_irq, ret;
1200         unsigned long i;
1201
1202         vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
1203         ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
1204         if (ret)
1205                 return false;
1206
1207         ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
1208         ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
1209         if (ret)
1210                 return false;
1211
1212         kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
1213                 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
1214                     vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
1215                         return false;
1216         }
1217
1218         return true;
1219 }
1220
1221 bool kvm_arch_timer_get_input_level(int vintid)
1222 {
1223         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
1224         struct arch_timer_context *timer;
1225
1226         if (WARN(!vcpu, "No vcpu context!\n"))
1227                 return false;
1228
1229         if (vintid == vcpu_vtimer(vcpu)->irq.irq)
1230                 timer = vcpu_vtimer(vcpu);
1231         else if (vintid == vcpu_ptimer(vcpu)->irq.irq)
1232                 timer = vcpu_ptimer(vcpu);
1233         else
1234                 BUG();
1235
1236         return kvm_timer_should_fire(timer);
1237 }
1238
1239 int kvm_timer_enable(struct kvm_vcpu *vcpu)
1240 {
1241         struct arch_timer_cpu *timer = vcpu_timer(vcpu);
1242         struct timer_map map;
1243         int ret;
1244
1245         if (timer->enabled)
1246                 return 0;
1247
1248         /* Without a VGIC we do not map virtual IRQs to physical IRQs */
1249         if (!irqchip_in_kernel(vcpu->kvm))
1250                 goto no_vgic;
1251
1252         /*
1253          * At this stage, we have the guarantee that the vgic is both
1254          * available and initialized.
1255          */
1256         if (!timer_irqs_are_valid(vcpu)) {
1257                 kvm_debug("incorrectly configured timer irqs\n");
1258                 return -EINVAL;
1259         }
1260
1261         get_timer_map(vcpu, &map);
1262
1263         ret = kvm_vgic_map_phys_irq(vcpu,
1264                                     map.direct_vtimer->host_timer_irq,
1265                                     map.direct_vtimer->irq.irq,
1266                                     &arch_timer_irq_ops);
1267         if (ret)
1268                 return ret;
1269
1270         if (map.direct_ptimer) {
1271                 ret = kvm_vgic_map_phys_irq(vcpu,
1272                                             map.direct_ptimer->host_timer_irq,
1273                                             map.direct_ptimer->irq.irq,
1274                                             &arch_timer_irq_ops);
1275         }
1276
1277         if (ret)
1278                 return ret;
1279
1280 no_vgic:
1281         timer->enabled = 1;
1282         return 0;
1283 }
1284
1285 /*
1286  * On VHE system, we only need to configure the EL2 timer trap register once,
1287  * not for every world switch.
1288  * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
1289  * and this makes those bits have no effect for the host kernel execution.
1290  */
1291 void kvm_timer_init_vhe(void)
1292 {
1293         /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
1294         u32 cnthctl_shift = 10;
1295         u64 val;
1296
1297         /*
1298          * VHE systems allow the guest direct access to the EL1 physical
1299          * timer/counter.
1300          */
1301         val = read_sysreg(cnthctl_el2);
1302         val |= (CNTHCTL_EL1PCEN << cnthctl_shift);
1303         val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
1304         write_sysreg(val, cnthctl_el2);
1305 }
1306
1307 static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
1308 {
1309         struct kvm_vcpu *vcpu;
1310         unsigned long i;
1311
1312         kvm_for_each_vcpu(i, vcpu, kvm) {
1313                 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
1314                 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
1315         }
1316 }
1317
1318 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1319 {
1320         int __user *uaddr = (int __user *)(long)attr->addr;
1321         struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
1322         struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
1323         int irq;
1324
1325         if (!irqchip_in_kernel(vcpu->kvm))
1326                 return -EINVAL;
1327
1328         if (get_user(irq, uaddr))
1329                 return -EFAULT;
1330
1331         if (!(irq_is_ppi(irq)))
1332                 return -EINVAL;
1333
1334         if (vcpu->arch.timer_cpu.enabled)
1335                 return -EBUSY;
1336
1337         switch (attr->attr) {
1338         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1339                 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
1340                 break;
1341         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1342                 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
1343                 break;
1344         default:
1345                 return -ENXIO;
1346         }
1347
1348         return 0;
1349 }
1350
1351 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1352 {
1353         int __user *uaddr = (int __user *)(long)attr->addr;
1354         struct arch_timer_context *timer;
1355         int irq;
1356
1357         switch (attr->attr) {
1358         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1359                 timer = vcpu_vtimer(vcpu);
1360                 break;
1361         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1362                 timer = vcpu_ptimer(vcpu);
1363                 break;
1364         default:
1365                 return -ENXIO;
1366         }
1367
1368         irq = timer->irq.irq;
1369         return put_user(irq, uaddr);
1370 }
1371
1372 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1373 {
1374         switch (attr->attr) {
1375         case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
1376         case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
1377                 return 0;
1378         }
1379
1380         return -ENXIO;
1381 }