arm64: kprobes: Restore local irqflag if kprobes is cancelled
[linux-2.6-microblaze.git] / arch / arm64 / kvm / hyp / vgic-v3-sr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012-2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <hyp/adjust_pc.h>
8
9 #include <linux/compiler.h>
10 #include <linux/irqchip/arm-gic-v3.h>
11 #include <linux/kvm_host.h>
12
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_hyp.h>
15 #include <asm/kvm_mmu.h>
16
17 #define vtr_to_max_lr_idx(v)            ((v) & 0xf)
18 #define vtr_to_nr_pre_bits(v)           ((((u32)(v) >> 26) & 7) + 1)
19 #define vtr_to_nr_apr_regs(v)           (1 << (vtr_to_nr_pre_bits(v) - 5))
20
21 static u64 __gic_v3_get_lr(unsigned int lr)
22 {
23         switch (lr & 0xf) {
24         case 0:
25                 return read_gicreg(ICH_LR0_EL2);
26         case 1:
27                 return read_gicreg(ICH_LR1_EL2);
28         case 2:
29                 return read_gicreg(ICH_LR2_EL2);
30         case 3:
31                 return read_gicreg(ICH_LR3_EL2);
32         case 4:
33                 return read_gicreg(ICH_LR4_EL2);
34         case 5:
35                 return read_gicreg(ICH_LR5_EL2);
36         case 6:
37                 return read_gicreg(ICH_LR6_EL2);
38         case 7:
39                 return read_gicreg(ICH_LR7_EL2);
40         case 8:
41                 return read_gicreg(ICH_LR8_EL2);
42         case 9:
43                 return read_gicreg(ICH_LR9_EL2);
44         case 10:
45                 return read_gicreg(ICH_LR10_EL2);
46         case 11:
47                 return read_gicreg(ICH_LR11_EL2);
48         case 12:
49                 return read_gicreg(ICH_LR12_EL2);
50         case 13:
51                 return read_gicreg(ICH_LR13_EL2);
52         case 14:
53                 return read_gicreg(ICH_LR14_EL2);
54         case 15:
55                 return read_gicreg(ICH_LR15_EL2);
56         }
57
58         unreachable();
59 }
60
61 static void __gic_v3_set_lr(u64 val, int lr)
62 {
63         switch (lr & 0xf) {
64         case 0:
65                 write_gicreg(val, ICH_LR0_EL2);
66                 break;
67         case 1:
68                 write_gicreg(val, ICH_LR1_EL2);
69                 break;
70         case 2:
71                 write_gicreg(val, ICH_LR2_EL2);
72                 break;
73         case 3:
74                 write_gicreg(val, ICH_LR3_EL2);
75                 break;
76         case 4:
77                 write_gicreg(val, ICH_LR4_EL2);
78                 break;
79         case 5:
80                 write_gicreg(val, ICH_LR5_EL2);
81                 break;
82         case 6:
83                 write_gicreg(val, ICH_LR6_EL2);
84                 break;
85         case 7:
86                 write_gicreg(val, ICH_LR7_EL2);
87                 break;
88         case 8:
89                 write_gicreg(val, ICH_LR8_EL2);
90                 break;
91         case 9:
92                 write_gicreg(val, ICH_LR9_EL2);
93                 break;
94         case 10:
95                 write_gicreg(val, ICH_LR10_EL2);
96                 break;
97         case 11:
98                 write_gicreg(val, ICH_LR11_EL2);
99                 break;
100         case 12:
101                 write_gicreg(val, ICH_LR12_EL2);
102                 break;
103         case 13:
104                 write_gicreg(val, ICH_LR13_EL2);
105                 break;
106         case 14:
107                 write_gicreg(val, ICH_LR14_EL2);
108                 break;
109         case 15:
110                 write_gicreg(val, ICH_LR15_EL2);
111                 break;
112         }
113 }
114
115 static void __vgic_v3_write_ap0rn(u32 val, int n)
116 {
117         switch (n) {
118         case 0:
119                 write_gicreg(val, ICH_AP0R0_EL2);
120                 break;
121         case 1:
122                 write_gicreg(val, ICH_AP0R1_EL2);
123                 break;
124         case 2:
125                 write_gicreg(val, ICH_AP0R2_EL2);
126                 break;
127         case 3:
128                 write_gicreg(val, ICH_AP0R3_EL2);
129                 break;
130         }
131 }
132
133 static void __vgic_v3_write_ap1rn(u32 val, int n)
134 {
135         switch (n) {
136         case 0:
137                 write_gicreg(val, ICH_AP1R0_EL2);
138                 break;
139         case 1:
140                 write_gicreg(val, ICH_AP1R1_EL2);
141                 break;
142         case 2:
143                 write_gicreg(val, ICH_AP1R2_EL2);
144                 break;
145         case 3:
146                 write_gicreg(val, ICH_AP1R3_EL2);
147                 break;
148         }
149 }
150
151 static u32 __vgic_v3_read_ap0rn(int n)
152 {
153         u32 val;
154
155         switch (n) {
156         case 0:
157                 val = read_gicreg(ICH_AP0R0_EL2);
158                 break;
159         case 1:
160                 val = read_gicreg(ICH_AP0R1_EL2);
161                 break;
162         case 2:
163                 val = read_gicreg(ICH_AP0R2_EL2);
164                 break;
165         case 3:
166                 val = read_gicreg(ICH_AP0R3_EL2);
167                 break;
168         default:
169                 unreachable();
170         }
171
172         return val;
173 }
174
175 static u32 __vgic_v3_read_ap1rn(int n)
176 {
177         u32 val;
178
179         switch (n) {
180         case 0:
181                 val = read_gicreg(ICH_AP1R0_EL2);
182                 break;
183         case 1:
184                 val = read_gicreg(ICH_AP1R1_EL2);
185                 break;
186         case 2:
187                 val = read_gicreg(ICH_AP1R2_EL2);
188                 break;
189         case 3:
190                 val = read_gicreg(ICH_AP1R3_EL2);
191                 break;
192         default:
193                 unreachable();
194         }
195
196         return val;
197 }
198
199 void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
200 {
201         u64 used_lrs = cpu_if->used_lrs;
202
203         /*
204          * Make sure stores to the GIC via the memory mapped interface
205          * are now visible to the system register interface when reading the
206          * LRs, and when reading back the VMCR on non-VHE systems.
207          */
208         if (used_lrs || !has_vhe()) {
209                 if (!cpu_if->vgic_sre) {
210                         dsb(sy);
211                         isb();
212                 }
213         }
214
215         if (used_lrs || cpu_if->its_vpe.its_vm) {
216                 int i;
217                 u32 elrsr;
218
219                 elrsr = read_gicreg(ICH_ELRSR_EL2);
220
221                 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
222
223                 for (i = 0; i < used_lrs; i++) {
224                         if (elrsr & (1 << i))
225                                 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
226                         else
227                                 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
228
229                         __gic_v3_set_lr(0, i);
230                 }
231         }
232 }
233
234 void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
235 {
236         u64 used_lrs = cpu_if->used_lrs;
237         int i;
238
239         if (used_lrs || cpu_if->its_vpe.its_vm) {
240                 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
241
242                 for (i = 0; i < used_lrs; i++)
243                         __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
244         }
245
246         /*
247          * Ensure that writes to the LRs, and on non-VHE systems ensure that
248          * the write to the VMCR in __vgic_v3_activate_traps(), will have
249          * reached the (re)distributors. This ensure the guest will read the
250          * correct values from the memory-mapped interface.
251          */
252         if (used_lrs || !has_vhe()) {
253                 if (!cpu_if->vgic_sre) {
254                         isb();
255                         dsb(sy);
256                 }
257         }
258 }
259
260 void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
261 {
262         /*
263          * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
264          * Group0 interrupt (as generated in GICv2 mode) to be
265          * delivered as a FIQ to the guest, with potentially fatal
266          * consequences. So we must make sure that ICC_SRE_EL1 has
267          * been actually programmed with the value we want before
268          * starting to mess with the rest of the GIC, and VMCR_EL2 in
269          * particular.  This logic must be called before
270          * __vgic_v3_restore_state().
271          */
272         if (!cpu_if->vgic_sre) {
273                 write_gicreg(0, ICC_SRE_EL1);
274                 isb();
275                 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
276
277
278                 if (has_vhe()) {
279                         /*
280                          * Ensure that the write to the VMCR will have reached
281                          * the (re)distributors. This ensure the guest will
282                          * read the correct values from the memory-mapped
283                          * interface.
284                          */
285                         isb();
286                         dsb(sy);
287                 }
288         }
289
290         /*
291          * Prevent the guest from touching the GIC system registers if
292          * SRE isn't enabled for GICv3 emulation.
293          */
294         write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
295                      ICC_SRE_EL2);
296
297         /*
298          * If we need to trap system registers, we must write
299          * ICH_HCR_EL2 anyway, even if no interrupts are being
300          * injected,
301          */
302         if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
303             cpu_if->its_vpe.its_vm)
304                 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
305 }
306
307 void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
308 {
309         u64 val;
310
311         if (!cpu_if->vgic_sre) {
312                 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
313         }
314
315         val = read_gicreg(ICC_SRE_EL2);
316         write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
317
318         if (!cpu_if->vgic_sre) {
319                 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
320                 isb();
321                 write_gicreg(1, ICC_SRE_EL1);
322         }
323
324         /*
325          * If we were trapping system registers, we enabled the VGIC even if
326          * no interrupts were being injected, and we disable it again here.
327          */
328         if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
329             cpu_if->its_vpe.its_vm)
330                 write_gicreg(0, ICH_HCR_EL2);
331 }
332
333 void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
334 {
335         u64 val;
336         u32 nr_pre_bits;
337
338         val = read_gicreg(ICH_VTR_EL2);
339         nr_pre_bits = vtr_to_nr_pre_bits(val);
340
341         switch (nr_pre_bits) {
342         case 7:
343                 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
344                 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
345                 fallthrough;
346         case 6:
347                 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
348                 fallthrough;
349         default:
350                 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
351         }
352
353         switch (nr_pre_bits) {
354         case 7:
355                 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
356                 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
357                 fallthrough;
358         case 6:
359                 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
360                 fallthrough;
361         default:
362                 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
363         }
364 }
365
366 void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
367 {
368         u64 val;
369         u32 nr_pre_bits;
370
371         val = read_gicreg(ICH_VTR_EL2);
372         nr_pre_bits = vtr_to_nr_pre_bits(val);
373
374         switch (nr_pre_bits) {
375         case 7:
376                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
377                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
378                 fallthrough;
379         case 6:
380                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
381                 fallthrough;
382         default:
383                 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
384         }
385
386         switch (nr_pre_bits) {
387         case 7:
388                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
389                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
390                 fallthrough;
391         case 6:
392                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
393                 fallthrough;
394         default:
395                 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
396         }
397 }
398
399 void __vgic_v3_init_lrs(void)
400 {
401         int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
402         int i;
403
404         for (i = 0; i <= max_lr_idx; i++)
405                 __gic_v3_set_lr(0, i);
406 }
407
408 u64 __vgic_v3_get_ich_vtr_el2(void)
409 {
410         return read_gicreg(ICH_VTR_EL2);
411 }
412
413 u64 __vgic_v3_read_vmcr(void)
414 {
415         return read_gicreg(ICH_VMCR_EL2);
416 }
417
418 void __vgic_v3_write_vmcr(u32 vmcr)
419 {
420         write_gicreg(vmcr, ICH_VMCR_EL2);
421 }
422
423 static int __vgic_v3_bpr_min(void)
424 {
425         /* See Pseudocode for VPriorityGroup */
426         return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
427 }
428
429 static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
430 {
431         u32 esr = kvm_vcpu_get_esr(vcpu);
432         u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
433
434         return crm != 8;
435 }
436
437 #define GICv3_IDLE_PRIORITY     0xff
438
439 static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
440                                          u64 *lr_val)
441 {
442         unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
443         u8 priority = GICv3_IDLE_PRIORITY;
444         int i, lr = -1;
445
446         for (i = 0; i < used_lrs; i++) {
447                 u64 val = __gic_v3_get_lr(i);
448                 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
449
450                 /* Not pending in the state? */
451                 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
452                         continue;
453
454                 /* Group-0 interrupt, but Group-0 disabled? */
455                 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
456                         continue;
457
458                 /* Group-1 interrupt, but Group-1 disabled? */
459                 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
460                         continue;
461
462                 /* Not the highest priority? */
463                 if (lr_prio >= priority)
464                         continue;
465
466                 /* This is a candidate */
467                 priority = lr_prio;
468                 *lr_val = val;
469                 lr = i;
470         }
471
472         if (lr == -1)
473                 *lr_val = ICC_IAR1_EL1_SPURIOUS;
474
475         return lr;
476 }
477
478 static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
479                                     u64 *lr_val)
480 {
481         unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
482         int i;
483
484         for (i = 0; i < used_lrs; i++) {
485                 u64 val = __gic_v3_get_lr(i);
486
487                 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
488                     (val & ICH_LR_ACTIVE_BIT)) {
489                         *lr_val = val;
490                         return i;
491                 }
492         }
493
494         *lr_val = ICC_IAR1_EL1_SPURIOUS;
495         return -1;
496 }
497
498 static int __vgic_v3_get_highest_active_priority(void)
499 {
500         u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
501         u32 hap = 0;
502         int i;
503
504         for (i = 0; i < nr_apr_regs; i++) {
505                 u32 val;
506
507                 /*
508                  * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
509                  * contain the active priority levels for this VCPU
510                  * for the maximum number of supported priority
511                  * levels, and we return the full priority level only
512                  * if the BPR is programmed to its minimum, otherwise
513                  * we return a combination of the priority level and
514                  * subpriority, as determined by the setting of the
515                  * BPR, but without the full subpriority.
516                  */
517                 val  = __vgic_v3_read_ap0rn(i);
518                 val |= __vgic_v3_read_ap1rn(i);
519                 if (!val) {
520                         hap += 32;
521                         continue;
522                 }
523
524                 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
525         }
526
527         return GICv3_IDLE_PRIORITY;
528 }
529
530 static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
531 {
532         return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
533 }
534
535 static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
536 {
537         unsigned int bpr;
538
539         if (vmcr & ICH_VMCR_CBPR_MASK) {
540                 bpr = __vgic_v3_get_bpr0(vmcr);
541                 if (bpr < 7)
542                         bpr++;
543         } else {
544                 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
545         }
546
547         return bpr;
548 }
549
550 /*
551  * Convert a priority to a preemption level, taking the relevant BPR
552  * into account by zeroing the sub-priority bits.
553  */
554 static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
555 {
556         unsigned int bpr;
557
558         if (!grp)
559                 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
560         else
561                 bpr = __vgic_v3_get_bpr1(vmcr);
562
563         return pri & (GENMASK(7, 0) << bpr);
564 }
565
566 /*
567  * The priority value is independent of any of the BPR values, so we
568  * normalize it using the minimal BPR value. This guarantees that no
569  * matter what the guest does with its BPR, we can always set/get the
570  * same value of a priority.
571  */
572 static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
573 {
574         u8 pre, ap;
575         u32 val;
576         int apr;
577
578         pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
579         ap = pre >> __vgic_v3_bpr_min();
580         apr = ap / 32;
581
582         if (!grp) {
583                 val = __vgic_v3_read_ap0rn(apr);
584                 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
585         } else {
586                 val = __vgic_v3_read_ap1rn(apr);
587                 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
588         }
589 }
590
591 static int __vgic_v3_clear_highest_active_priority(void)
592 {
593         u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
594         u32 hap = 0;
595         int i;
596
597         for (i = 0; i < nr_apr_regs; i++) {
598                 u32 ap0, ap1;
599                 int c0, c1;
600
601                 ap0 = __vgic_v3_read_ap0rn(i);
602                 ap1 = __vgic_v3_read_ap1rn(i);
603                 if (!ap0 && !ap1) {
604                         hap += 32;
605                         continue;
606                 }
607
608                 c0 = ap0 ? __ffs(ap0) : 32;
609                 c1 = ap1 ? __ffs(ap1) : 32;
610
611                 /* Always clear the LSB, which is the highest priority */
612                 if (c0 < c1) {
613                         ap0 &= ~BIT(c0);
614                         __vgic_v3_write_ap0rn(ap0, i);
615                         hap += c0;
616                 } else {
617                         ap1 &= ~BIT(c1);
618                         __vgic_v3_write_ap1rn(ap1, i);
619                         hap += c1;
620                 }
621
622                 /* Rescale to 8 bits of priority */
623                 return hap << __vgic_v3_bpr_min();
624         }
625
626         return GICv3_IDLE_PRIORITY;
627 }
628
629 static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
630 {
631         u64 lr_val;
632         u8 lr_prio, pmr;
633         int lr, grp;
634
635         grp = __vgic_v3_get_group(vcpu);
636
637         lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
638         if (lr < 0)
639                 goto spurious;
640
641         if (grp != !!(lr_val & ICH_LR_GROUP))
642                 goto spurious;
643
644         pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
645         lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
646         if (pmr <= lr_prio)
647                 goto spurious;
648
649         if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
650                 goto spurious;
651
652         lr_val &= ~ICH_LR_STATE;
653         /* No active state for LPIs */
654         if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
655                 lr_val |= ICH_LR_ACTIVE_BIT;
656         __gic_v3_set_lr(lr_val, lr);
657         __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
658         vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
659         return;
660
661 spurious:
662         vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
663 }
664
665 static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
666 {
667         lr_val &= ~ICH_LR_ACTIVE_BIT;
668         if (lr_val & ICH_LR_HW) {
669                 u32 pid;
670
671                 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
672                 gic_write_dir(pid);
673         }
674
675         __gic_v3_set_lr(lr_val, lr);
676 }
677
678 static void __vgic_v3_bump_eoicount(void)
679 {
680         u32 hcr;
681
682         hcr = read_gicreg(ICH_HCR_EL2);
683         hcr += 1 << ICH_HCR_EOIcount_SHIFT;
684         write_gicreg(hcr, ICH_HCR_EL2);
685 }
686
687 static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
688 {
689         u32 vid = vcpu_get_reg(vcpu, rt);
690         u64 lr_val;
691         int lr;
692
693         /* EOImode == 0, nothing to be done here */
694         if (!(vmcr & ICH_VMCR_EOIM_MASK))
695                 return;
696
697         /* No deactivate to be performed on an LPI */
698         if (vid >= VGIC_MIN_LPI)
699                 return;
700
701         lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
702         if (lr == -1) {
703                 __vgic_v3_bump_eoicount();
704                 return;
705         }
706
707         __vgic_v3_clear_active_lr(lr, lr_val);
708 }
709
710 static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
711 {
712         u32 vid = vcpu_get_reg(vcpu, rt);
713         u64 lr_val;
714         u8 lr_prio, act_prio;
715         int lr, grp;
716
717         grp = __vgic_v3_get_group(vcpu);
718
719         /* Drop priority in any case */
720         act_prio = __vgic_v3_clear_highest_active_priority();
721
722         /* If EOIing an LPI, no deactivate to be performed */
723         if (vid >= VGIC_MIN_LPI)
724                 return;
725
726         /* EOImode == 1, nothing to be done here */
727         if (vmcr & ICH_VMCR_EOIM_MASK)
728                 return;
729
730         lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
731         if (lr == -1) {
732                 __vgic_v3_bump_eoicount();
733                 return;
734         }
735
736         lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
737
738         /* If priorities or group do not match, the guest has fscked-up. */
739         if (grp != !!(lr_val & ICH_LR_GROUP) ||
740             __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
741                 return;
742
743         /* Let's now perform the deactivation */
744         __vgic_v3_clear_active_lr(lr, lr_val);
745 }
746
747 static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
748 {
749         vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
750 }
751
752 static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
753 {
754         vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
755 }
756
757 static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
758 {
759         u64 val = vcpu_get_reg(vcpu, rt);
760
761         if (val & 1)
762                 vmcr |= ICH_VMCR_ENG0_MASK;
763         else
764                 vmcr &= ~ICH_VMCR_ENG0_MASK;
765
766         __vgic_v3_write_vmcr(vmcr);
767 }
768
769 static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
770 {
771         u64 val = vcpu_get_reg(vcpu, rt);
772
773         if (val & 1)
774                 vmcr |= ICH_VMCR_ENG1_MASK;
775         else
776                 vmcr &= ~ICH_VMCR_ENG1_MASK;
777
778         __vgic_v3_write_vmcr(vmcr);
779 }
780
781 static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
782 {
783         vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
784 }
785
786 static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
787 {
788         vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
789 }
790
791 static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
792 {
793         u64 val = vcpu_get_reg(vcpu, rt);
794         u8 bpr_min = __vgic_v3_bpr_min() - 1;
795
796         /* Enforce BPR limiting */
797         if (val < bpr_min)
798                 val = bpr_min;
799
800         val <<= ICH_VMCR_BPR0_SHIFT;
801         val &= ICH_VMCR_BPR0_MASK;
802         vmcr &= ~ICH_VMCR_BPR0_MASK;
803         vmcr |= val;
804
805         __vgic_v3_write_vmcr(vmcr);
806 }
807
808 static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
809 {
810         u64 val = vcpu_get_reg(vcpu, rt);
811         u8 bpr_min = __vgic_v3_bpr_min();
812
813         if (vmcr & ICH_VMCR_CBPR_MASK)
814                 return;
815
816         /* Enforce BPR limiting */
817         if (val < bpr_min)
818                 val = bpr_min;
819
820         val <<= ICH_VMCR_BPR1_SHIFT;
821         val &= ICH_VMCR_BPR1_MASK;
822         vmcr &= ~ICH_VMCR_BPR1_MASK;
823         vmcr |= val;
824
825         __vgic_v3_write_vmcr(vmcr);
826 }
827
828 static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
829 {
830         u32 val;
831
832         if (!__vgic_v3_get_group(vcpu))
833                 val = __vgic_v3_read_ap0rn(n);
834         else
835                 val = __vgic_v3_read_ap1rn(n);
836
837         vcpu_set_reg(vcpu, rt, val);
838 }
839
840 static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
841 {
842         u32 val = vcpu_get_reg(vcpu, rt);
843
844         if (!__vgic_v3_get_group(vcpu))
845                 __vgic_v3_write_ap0rn(val, n);
846         else
847                 __vgic_v3_write_ap1rn(val, n);
848 }
849
850 static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
851                                             u32 vmcr, int rt)
852 {
853         __vgic_v3_read_apxrn(vcpu, rt, 0);
854 }
855
856 static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
857                                             u32 vmcr, int rt)
858 {
859         __vgic_v3_read_apxrn(vcpu, rt, 1);
860 }
861
862 static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
863 {
864         __vgic_v3_read_apxrn(vcpu, rt, 2);
865 }
866
867 static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
868 {
869         __vgic_v3_read_apxrn(vcpu, rt, 3);
870 }
871
872 static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
873 {
874         __vgic_v3_write_apxrn(vcpu, rt, 0);
875 }
876
877 static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
878 {
879         __vgic_v3_write_apxrn(vcpu, rt, 1);
880 }
881
882 static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
883 {
884         __vgic_v3_write_apxrn(vcpu, rt, 2);
885 }
886
887 static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
888 {
889         __vgic_v3_write_apxrn(vcpu, rt, 3);
890 }
891
892 static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
893 {
894         u64 lr_val;
895         int lr, lr_grp, grp;
896
897         grp = __vgic_v3_get_group(vcpu);
898
899         lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
900         if (lr == -1)
901                 goto spurious;
902
903         lr_grp = !!(lr_val & ICH_LR_GROUP);
904         if (lr_grp != grp)
905                 lr_val = ICC_IAR1_EL1_SPURIOUS;
906
907 spurious:
908         vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
909 }
910
911 static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
912 {
913         vmcr &= ICH_VMCR_PMR_MASK;
914         vmcr >>= ICH_VMCR_PMR_SHIFT;
915         vcpu_set_reg(vcpu, rt, vmcr);
916 }
917
918 static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
919 {
920         u32 val = vcpu_get_reg(vcpu, rt);
921
922         val <<= ICH_VMCR_PMR_SHIFT;
923         val &= ICH_VMCR_PMR_MASK;
924         vmcr &= ~ICH_VMCR_PMR_MASK;
925         vmcr |= val;
926
927         write_gicreg(vmcr, ICH_VMCR_EL2);
928 }
929
930 static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
931 {
932         u32 val = __vgic_v3_get_highest_active_priority();
933         vcpu_set_reg(vcpu, rt, val);
934 }
935
936 static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
937 {
938         u32 vtr, val;
939
940         vtr = read_gicreg(ICH_VTR_EL2);
941         /* PRIbits */
942         val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
943         /* IDbits */
944         val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
945         /* SEIS */
946         val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
947         /* A3V */
948         val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
949         /* EOImode */
950         val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
951         /* CBPR */
952         val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
953
954         vcpu_set_reg(vcpu, rt, val);
955 }
956
957 static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
958 {
959         u32 val = vcpu_get_reg(vcpu, rt);
960
961         if (val & ICC_CTLR_EL1_CBPR_MASK)
962                 vmcr |= ICH_VMCR_CBPR_MASK;
963         else
964                 vmcr &= ~ICH_VMCR_CBPR_MASK;
965
966         if (val & ICC_CTLR_EL1_EOImode_MASK)
967                 vmcr |= ICH_VMCR_EOIM_MASK;
968         else
969                 vmcr &= ~ICH_VMCR_EOIM_MASK;
970
971         write_gicreg(vmcr, ICH_VMCR_EL2);
972 }
973
974 int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
975 {
976         int rt;
977         u32 esr;
978         u32 vmcr;
979         void (*fn)(struct kvm_vcpu *, u32, int);
980         bool is_read;
981         u32 sysreg;
982
983         esr = kvm_vcpu_get_esr(vcpu);
984         if (vcpu_mode_is_32bit(vcpu)) {
985                 if (!kvm_condition_valid(vcpu)) {
986                         __kvm_skip_instr(vcpu);
987                         return 1;
988                 }
989
990                 sysreg = esr_cp15_to_sysreg(esr);
991         } else {
992                 sysreg = esr_sys64_to_sysreg(esr);
993         }
994
995         is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
996
997         switch (sysreg) {
998         case SYS_ICC_IAR0_EL1:
999         case SYS_ICC_IAR1_EL1:
1000                 if (unlikely(!is_read))
1001                         return 0;
1002                 fn = __vgic_v3_read_iar;
1003                 break;
1004         case SYS_ICC_EOIR0_EL1:
1005         case SYS_ICC_EOIR1_EL1:
1006                 if (unlikely(is_read))
1007                         return 0;
1008                 fn = __vgic_v3_write_eoir;
1009                 break;
1010         case SYS_ICC_IGRPEN1_EL1:
1011                 if (is_read)
1012                         fn = __vgic_v3_read_igrpen1;
1013                 else
1014                         fn = __vgic_v3_write_igrpen1;
1015                 break;
1016         case SYS_ICC_BPR1_EL1:
1017                 if (is_read)
1018                         fn = __vgic_v3_read_bpr1;
1019                 else
1020                         fn = __vgic_v3_write_bpr1;
1021                 break;
1022         case SYS_ICC_AP0Rn_EL1(0):
1023         case SYS_ICC_AP1Rn_EL1(0):
1024                 if (is_read)
1025                         fn = __vgic_v3_read_apxr0;
1026                 else
1027                         fn = __vgic_v3_write_apxr0;
1028                 break;
1029         case SYS_ICC_AP0Rn_EL1(1):
1030         case SYS_ICC_AP1Rn_EL1(1):
1031                 if (is_read)
1032                         fn = __vgic_v3_read_apxr1;
1033                 else
1034                         fn = __vgic_v3_write_apxr1;
1035                 break;
1036         case SYS_ICC_AP0Rn_EL1(2):
1037         case SYS_ICC_AP1Rn_EL1(2):
1038                 if (is_read)
1039                         fn = __vgic_v3_read_apxr2;
1040                 else
1041                         fn = __vgic_v3_write_apxr2;
1042                 break;
1043         case SYS_ICC_AP0Rn_EL1(3):
1044         case SYS_ICC_AP1Rn_EL1(3):
1045                 if (is_read)
1046                         fn = __vgic_v3_read_apxr3;
1047                 else
1048                         fn = __vgic_v3_write_apxr3;
1049                 break;
1050         case SYS_ICC_HPPIR0_EL1:
1051         case SYS_ICC_HPPIR1_EL1:
1052                 if (unlikely(!is_read))
1053                         return 0;
1054                 fn = __vgic_v3_read_hppir;
1055                 break;
1056         case SYS_ICC_IGRPEN0_EL1:
1057                 if (is_read)
1058                         fn = __vgic_v3_read_igrpen0;
1059                 else
1060                         fn = __vgic_v3_write_igrpen0;
1061                 break;
1062         case SYS_ICC_BPR0_EL1:
1063                 if (is_read)
1064                         fn = __vgic_v3_read_bpr0;
1065                 else
1066                         fn = __vgic_v3_write_bpr0;
1067                 break;
1068         case SYS_ICC_DIR_EL1:
1069                 if (unlikely(is_read))
1070                         return 0;
1071                 fn = __vgic_v3_write_dir;
1072                 break;
1073         case SYS_ICC_RPR_EL1:
1074                 if (unlikely(!is_read))
1075                         return 0;
1076                 fn = __vgic_v3_read_rpr;
1077                 break;
1078         case SYS_ICC_CTLR_EL1:
1079                 if (is_read)
1080                         fn = __vgic_v3_read_ctlr;
1081                 else
1082                         fn = __vgic_v3_write_ctlr;
1083                 break;
1084         case SYS_ICC_PMR_EL1:
1085                 if (is_read)
1086                         fn = __vgic_v3_read_pmr;
1087                 else
1088                         fn = __vgic_v3_write_pmr;
1089                 break;
1090         default:
1091                 return 0;
1092         }
1093
1094         vmcr = __vgic_v3_read_vmcr();
1095         rt = kvm_vcpu_sys_get_rt(vcpu);
1096         fn(vcpu, vmcr, rt);
1097
1098         __kvm_skip_instr(vcpu);
1099
1100         return 1;
1101 }