Merge branch 'modules-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof...
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / aarch64 / vgic_irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vgic_irq.c - Test userspace injection of IRQs
4  *
5  * This test validates the injection of IRQs from userspace using various
6  * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7  * host to inject a specific intid via a GUEST_SYNC call, and then checks that
8  * it received it.
9  */
10
11 #include <asm/kvm.h>
12 #include <asm/kvm_para.h>
13 #include <sys/eventfd.h>
14 #include <linux/sizes.h>
15
16 #include "processor.h"
17 #include "test_util.h"
18 #include "kvm_util.h"
19 #include "gic.h"
20 #include "gic_v3.h"
21 #include "vgic.h"
22
23 #define GICD_BASE_GPA           0x08000000ULL
24 #define GICR_BASE_GPA           0x080A0000ULL
25 #define VCPU_ID                 0
26
27 /*
28  * Stores the user specified args; it's passed to the guest and to every test
29  * function.
30  */
31 struct test_args {
32         uint32_t nr_irqs; /* number of KVM supported IRQs. */
33         bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
34         bool level_sensitive; /* 1 is level, 0 is edge */
35         int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
36         bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
37 };
38
39 /*
40  * KVM implements 32 priority levels:
41  * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
42  *
43  * Note that these macros will still be correct in the case that KVM implements
44  * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
45  */
46 #define KVM_NUM_PRIOS           32
47 #define KVM_PRIO_SHIFT          3 /* steps of 8 = 1 << 3 */
48 #define KVM_PRIO_STEPS          (1 << KVM_PRIO_SHIFT) /* 8 */
49 #define LOWEST_PRIO             (KVM_NUM_PRIOS - 1)
50 #define CPU_PRIO_MASK           (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
51 #define IRQ_DEFAULT_PRIO        (LOWEST_PRIO - 1)
52 #define IRQ_DEFAULT_PRIO_REG    (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
53
54 static void *dist = (void *)GICD_BASE_GPA;
55 static void *redist = (void *)GICR_BASE_GPA;
56
57 /*
58  * The kvm_inject_* utilities are used by the guest to ask the host to inject
59  * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
60  */
61
62 typedef enum {
63         KVM_INJECT_EDGE_IRQ_LINE = 1,
64         KVM_SET_IRQ_LINE,
65         KVM_SET_IRQ_LINE_HIGH,
66         KVM_SET_LEVEL_INFO_HIGH,
67         KVM_INJECT_IRQFD,
68         KVM_WRITE_ISPENDR,
69         KVM_WRITE_ISACTIVER,
70 } kvm_inject_cmd;
71
72 struct kvm_inject_args {
73         kvm_inject_cmd cmd;
74         uint32_t first_intid;
75         uint32_t num;
76         int level;
77         bool expect_failure;
78 };
79
80 /* Used on the guest side to perform the hypercall. */
81 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
82                 uint32_t num, int level, bool expect_failure);
83
84 /* Used on the host side to get the hypercall info. */
85 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
86                 struct kvm_inject_args *args);
87
88 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure)                      \
89         kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
90
91 #define KVM_INJECT_MULTI(cmd, intid, num)                                       \
92         _KVM_INJECT_MULTI(cmd, intid, num, false)
93
94 #define _KVM_INJECT(cmd, intid, expect_failure)                                 \
95         _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
96
97 #define KVM_INJECT(cmd, intid)                                                  \
98         _KVM_INJECT_MULTI(cmd, intid, 1, false)
99
100 #define KVM_ACTIVATE(cmd, intid)                                                \
101         kvm_inject_call(cmd, intid, 1, 1, false);
102
103 struct kvm_inject_desc {
104         kvm_inject_cmd cmd;
105         /* can inject PPIs, PPIs, and/or SPIs. */
106         bool sgi, ppi, spi;
107 };
108
109 static struct kvm_inject_desc inject_edge_fns[] = {
110         /*                                      sgi    ppi    spi */
111         { KVM_INJECT_EDGE_IRQ_LINE,             false, false, true },
112         { KVM_INJECT_IRQFD,                     false, false, true },
113         { KVM_WRITE_ISPENDR,                    true,  false, true },
114         { 0, },
115 };
116
117 static struct kvm_inject_desc inject_level_fns[] = {
118         /*                                      sgi    ppi    spi */
119         { KVM_SET_IRQ_LINE_HIGH,                false, true,  true },
120         { KVM_SET_LEVEL_INFO_HIGH,              false, true,  true },
121         { KVM_INJECT_IRQFD,                     false, false, true },
122         { KVM_WRITE_ISPENDR,                    false, true,  true },
123         { 0, },
124 };
125
126 static struct kvm_inject_desc set_active_fns[] = {
127         /*                                      sgi    ppi    spi */
128         { KVM_WRITE_ISACTIVER,                  true,  true,  true },
129         { 0, },
130 };
131
132 #define for_each_inject_fn(t, f)                                                \
133         for ((f) = (t); (f)->cmd; (f)++)
134
135 #define for_each_supported_inject_fn(args, t, f)                                \
136         for_each_inject_fn(t, f)                                                \
137                 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
138
139 #define for_each_supported_activate_fn(args, t, f)                              \
140         for_each_supported_inject_fn((args), (t), (f))
141
142 /* Shared between the guest main thread and the IRQ handlers. */
143 volatile uint64_t irq_handled;
144 volatile uint32_t irqnr_received[MAX_SPI + 1];
145
146 static void reset_stats(void)
147 {
148         int i;
149
150         irq_handled = 0;
151         for (i = 0; i <= MAX_SPI; i++)
152                 irqnr_received[i] = 0;
153 }
154
155 static uint64_t gic_read_ap1r0(void)
156 {
157         uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
158
159         dsb(sy);
160         return reg;
161 }
162
163 static void gic_write_ap1r0(uint64_t val)
164 {
165         write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
166         isb();
167 }
168
169 static void guest_set_irq_line(uint32_t intid, uint32_t level);
170
171 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
172 {
173         uint32_t intid = gic_get_and_ack_irq();
174
175         if (intid == IAR_SPURIOUS)
176                 return;
177
178         GUEST_ASSERT(gic_irq_get_active(intid));
179
180         if (!level_sensitive)
181                 GUEST_ASSERT(!gic_irq_get_pending(intid));
182
183         if (level_sensitive)
184                 guest_set_irq_line(intid, 0);
185
186         GUEST_ASSERT(intid < MAX_SPI);
187         irqnr_received[intid] += 1;
188         irq_handled += 1;
189
190         gic_set_eoi(intid);
191         GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
192         if (eoi_split)
193                 gic_set_dir(intid);
194
195         GUEST_ASSERT(!gic_irq_get_active(intid));
196         GUEST_ASSERT(!gic_irq_get_pending(intid));
197 }
198
199 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
200                 uint32_t num, int level, bool expect_failure)
201 {
202         struct kvm_inject_args args = {
203                 .cmd = cmd,
204                 .first_intid = first_intid,
205                 .num = num,
206                 .level = level,
207                 .expect_failure = expect_failure,
208         };
209         GUEST_SYNC(&args);
210 }
211
212 #define GUEST_ASSERT_IAR_EMPTY()                                                \
213 do {                                                                            \
214         uint32_t _intid;                                                        \
215         _intid = gic_get_and_ack_irq();                                         \
216         GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS);                    \
217 } while (0)
218
219 #define CAT_HELPER(a, b) a ## b
220 #define CAT(a, b) CAT_HELPER(a, b)
221 #define PREFIX guest_irq_handler_
222 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
223 #define GENERATE_GUEST_IRQ_HANDLER(split, lev)                                  \
224 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs)                  \
225 {                                                                               \
226         guest_irq_generic_handler(split, lev);                                  \
227 }
228
229 GENERATE_GUEST_IRQ_HANDLER(0, 0);
230 GENERATE_GUEST_IRQ_HANDLER(0, 1);
231 GENERATE_GUEST_IRQ_HANDLER(1, 0);
232 GENERATE_GUEST_IRQ_HANDLER(1, 1);
233
234 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
235         {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
236         {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
237 };
238
239 static void reset_priorities(struct test_args *args)
240 {
241         int i;
242
243         for (i = 0; i < args->nr_irqs; i++)
244                 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
245 }
246
247 static void guest_set_irq_line(uint32_t intid, uint32_t level)
248 {
249         kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
250 }
251
252 static void test_inject_fail(struct test_args *args,
253                 uint32_t intid, kvm_inject_cmd cmd)
254 {
255         reset_stats();
256
257         _KVM_INJECT(cmd, intid, true);
258         /* no IRQ to handle on entry */
259
260         GUEST_ASSERT_EQ(irq_handled, 0);
261         GUEST_ASSERT_IAR_EMPTY();
262 }
263
264 static void guest_inject(struct test_args *args,
265                 uint32_t first_intid, uint32_t num,
266                 kvm_inject_cmd cmd)
267 {
268         uint32_t i;
269
270         reset_stats();
271
272         /* Cycle over all priorities to make things more interesting. */
273         for (i = first_intid; i < num + first_intid; i++)
274                 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
275
276         asm volatile("msr daifset, #2" : : : "memory");
277         KVM_INJECT_MULTI(cmd, first_intid, num);
278
279         while (irq_handled < num) {
280                 asm volatile("wfi\n"
281                              "msr daifclr, #2\n"
282                              /* handle IRQ */
283                              "msr daifset, #2\n"
284                              : : : "memory");
285         }
286         asm volatile("msr daifclr, #2" : : : "memory");
287
288         GUEST_ASSERT_EQ(irq_handled, num);
289         for (i = first_intid; i < num + first_intid; i++)
290                 GUEST_ASSERT_EQ(irqnr_received[i], 1);
291         GUEST_ASSERT_IAR_EMPTY();
292
293         reset_priorities(args);
294 }
295
296 /*
297  * Restore the active state of multiple concurrent IRQs (given by
298  * concurrent_irqs).  This does what a live-migration would do on the
299  * destination side assuming there are some active IRQs that were not
300  * deactivated yet.
301  */
302 static void guest_restore_active(struct test_args *args,
303                 uint32_t first_intid, uint32_t num,
304                 kvm_inject_cmd cmd)
305 {
306         uint32_t prio, intid, ap1r;
307         int i;
308
309         /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
310          * in descending order, so intid+1 can preempt intid.
311          */
312         for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
313                 GUEST_ASSERT(prio >= 0);
314                 intid = i + first_intid;
315                 gic_set_priority(intid, prio);
316         }
317
318         /* In a real migration, KVM would restore all GIC state before running
319          * guest code.
320          */
321         for (i = 0; i < num; i++) {
322                 intid = i + first_intid;
323                 KVM_ACTIVATE(cmd, intid);
324                 ap1r = gic_read_ap1r0();
325                 ap1r |= 1U << i;
326                 gic_write_ap1r0(ap1r);
327         }
328
329         /* This is where the "migration" would occur. */
330
331         /* finish handling the IRQs starting with the highest priority one. */
332         for (i = 0; i < num; i++) {
333                 intid = num - i - 1 + first_intid;
334                 gic_set_eoi(intid);
335                 if (args->eoi_split)
336                         gic_set_dir(intid);
337         }
338
339         for (i = 0; i < num; i++)
340                 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
341         GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
342         GUEST_ASSERT_IAR_EMPTY();
343 }
344
345 /*
346  * Polls the IAR until it's not a spurious interrupt.
347  *
348  * This function should only be used in test_inject_preemption (with IRQs
349  * masked).
350  */
351 static uint32_t wait_for_and_activate_irq(void)
352 {
353         uint32_t intid;
354
355         do {
356                 asm volatile("wfi" : : : "memory");
357                 intid = gic_get_and_ack_irq();
358         } while (intid == IAR_SPURIOUS);
359
360         return intid;
361 }
362
363 /*
364  * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
365  * handle them without handling the actual exceptions.  This is done by masking
366  * interrupts for the whole test.
367  */
368 static void test_inject_preemption(struct test_args *args,
369                 uint32_t first_intid, int num,
370                 kvm_inject_cmd cmd)
371 {
372         uint32_t intid, prio, step = KVM_PRIO_STEPS;
373         int i;
374
375         /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
376          * in descending order, so intid+1 can preempt intid.
377          */
378         for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
379                 GUEST_ASSERT(prio >= 0);
380                 intid = i + first_intid;
381                 gic_set_priority(intid, prio);
382         }
383
384         local_irq_disable();
385
386         for (i = 0; i < num; i++) {
387                 uint32_t tmp;
388                 intid = i + first_intid;
389                 KVM_INJECT(cmd, intid);
390                 /* Each successive IRQ will preempt the previous one. */
391                 tmp = wait_for_and_activate_irq();
392                 GUEST_ASSERT_EQ(tmp, intid);
393                 if (args->level_sensitive)
394                         guest_set_irq_line(intid, 0);
395         }
396
397         /* finish handling the IRQs starting with the highest priority one. */
398         for (i = 0; i < num; i++) {
399                 intid = num - i - 1 + first_intid;
400                 gic_set_eoi(intid);
401                 if (args->eoi_split)
402                         gic_set_dir(intid);
403         }
404
405         local_irq_enable();
406
407         for (i = 0; i < num; i++)
408                 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
409         GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
410         GUEST_ASSERT_IAR_EMPTY();
411
412         reset_priorities(args);
413 }
414
415 static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
416 {
417         uint32_t nr_irqs = args->nr_irqs;
418
419         if (f->sgi) {
420                 guest_inject(args, MIN_SGI, 1, f->cmd);
421                 guest_inject(args, 0, 16, f->cmd);
422         }
423
424         if (f->ppi)
425                 guest_inject(args, MIN_PPI, 1, f->cmd);
426
427         if (f->spi) {
428                 guest_inject(args, MIN_SPI, 1, f->cmd);
429                 guest_inject(args, nr_irqs - 1, 1, f->cmd);
430                 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
431         }
432 }
433
434 static void test_injection_failure(struct test_args *args,
435                 struct kvm_inject_desc *f)
436 {
437         uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
438         int i;
439
440         for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
441                 test_inject_fail(args, bad_intid[i], f->cmd);
442 }
443
444 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
445 {
446         /*
447          * Test up to 4 levels of preemption. The reason is that KVM doesn't
448          * currently implement the ability to have more than the number-of-LRs
449          * number of concurrently active IRQs. The number of LRs implemented is
450          * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
451          */
452         if (f->sgi)
453                 test_inject_preemption(args, MIN_SGI, 4, f->cmd);
454
455         if (f->ppi)
456                 test_inject_preemption(args, MIN_PPI, 4, f->cmd);
457
458         if (f->spi)
459                 test_inject_preemption(args, MIN_SPI, 4, f->cmd);
460 }
461
462 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
463 {
464         /* Test up to 4 active IRQs. Same reason as in test_preemption. */
465         if (f->sgi)
466                 guest_restore_active(args, MIN_SGI, 4, f->cmd);
467
468         if (f->ppi)
469                 guest_restore_active(args, MIN_PPI, 4, f->cmd);
470
471         if (f->spi)
472                 guest_restore_active(args, MIN_SPI, 4, f->cmd);
473 }
474
475 static void guest_code(struct test_args args)
476 {
477         uint32_t i, nr_irqs = args.nr_irqs;
478         bool level_sensitive = args.level_sensitive;
479         struct kvm_inject_desc *f, *inject_fns;
480
481         gic_init(GIC_V3, 1, dist, redist);
482
483         for (i = 0; i < nr_irqs; i++)
484                 gic_irq_enable(i);
485
486         for (i = MIN_SPI; i < nr_irqs; i++)
487                 gic_irq_set_config(i, !args.level_sensitive);
488
489         gic_set_eoi_split(args.eoi_split);
490
491         reset_priorities(&args);
492         gic_set_priority_mask(CPU_PRIO_MASK);
493
494         inject_fns  = level_sensitive ? inject_level_fns
495                                       : inject_edge_fns;
496
497         local_irq_enable();
498
499         /* Start the tests. */
500         for_each_supported_inject_fn(&args, inject_fns, f) {
501                 test_injection(&args, f);
502                 test_preemption(&args, f);
503                 test_injection_failure(&args, f);
504         }
505
506         /* Restore the active state of IRQs. This would happen when live
507          * migrating IRQs in the middle of being handled.
508          */
509         for_each_supported_activate_fn(&args, set_active_fns, f)
510                 test_restore_active(&args, f);
511
512         GUEST_DONE();
513 }
514
515 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
516                         struct test_args *test_args, bool expect_failure)
517 {
518         int ret;
519
520         if (!expect_failure) {
521                 kvm_arm_irq_line(vm, intid, level);
522         } else {
523                 /* The interface doesn't allow larger intid's. */
524                 if (intid > KVM_ARM_IRQ_NUM_MASK)
525                         return;
526
527                 ret = _kvm_arm_irq_line(vm, intid, level);
528                 TEST_ASSERT(ret != 0 && errno == EINVAL,
529                                 "Bad intid %i did not cause KVM_IRQ_LINE "
530                                 "error: rc: %i errno: %i", intid, ret, errno);
531         }
532 }
533
534 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
535                         bool expect_failure)
536 {
537         if (!expect_failure) {
538                 kvm_irq_set_level_info(gic_fd, intid, level);
539         } else {
540                 int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
541                 /*
542                  * The kernel silently fails for invalid SPIs and SGIs (which
543                  * are not level-sensitive). It only checks for intid to not
544                  * spill over 1U << 10 (the max reserved SPI). Also, callers
545                  * are supposed to mask the intid with 0x3ff (1023).
546                  */
547                 if (intid > VGIC_MAX_RESERVED)
548                         TEST_ASSERT(ret != 0 && errno == EINVAL,
549                                 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
550                                 "error: rc: %i errno: %i", intid, ret, errno);
551                 else
552                         TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
553                                 "for intid %i failed, rc: %i errno: %i",
554                                 intid, ret, errno);
555         }
556 }
557
558 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
559                 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
560                 bool expect_failure)
561 {
562         struct kvm_irq_routing *routing;
563         int ret;
564         uint64_t i;
565
566         assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
567
568         routing = kvm_gsi_routing_create();
569         for (i = intid; i < (uint64_t)intid + num; i++)
570                 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
571
572         if (!expect_failure) {
573                 kvm_gsi_routing_write(vm, routing);
574         } else {
575                 ret = _kvm_gsi_routing_write(vm, routing);
576                 /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
577                 if (intid >= KVM_IRQCHIP_NUM_PINS)
578                         TEST_ASSERT(ret != 0 && errno == EINVAL,
579                                 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
580                                 "error: rc: %i errno: %i", intid, ret, errno);
581                 else
582                         TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
583                                 "for intid %i failed, rc: %i errno: %i",
584                                 intid, ret, errno);
585         }
586 }
587
588 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
589                         uint32_t vcpu, bool expect_failure)
590 {
591         /*
592          * Ignore this when expecting failure as invalid intids will lead to
593          * either trying to inject SGIs when we configured the test to be
594          * level_sensitive (or the reverse), or inject large intids which
595          * will lead to writing above the ISPENDR register space (and we
596          * don't want to do that either).
597          */
598         if (!expect_failure)
599                 kvm_irq_write_ispendr(gic_fd, intid, vcpu);
600 }
601
602 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
603                 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
604                 bool expect_failure)
605 {
606         int fd[MAX_SPI];
607         uint64_t val;
608         int ret, f;
609         uint64_t i;
610
611         /*
612          * There is no way to try injecting an SGI or PPI as the interface
613          * starts counting from the first SPI (above the private ones), so just
614          * exit.
615          */
616         if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
617                 return;
618
619         kvm_set_gsi_routing_irqchip_check(vm, intid, num,
620                         kvm_max_routes, expect_failure);
621
622         /*
623          * If expect_failure, then just to inject anyway. These
624          * will silently fail. And in any case, the guest will check
625          * that no actual interrupt was injected for those cases.
626          */
627
628         for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
629                 fd[f] = eventfd(0, 0);
630                 TEST_ASSERT(fd[f] != -1,
631                         "eventfd failed, errno: %i\n", errno);
632         }
633
634         for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
635                 struct kvm_irqfd irqfd = {
636                         .fd  = fd[f],
637                         .gsi = i - MIN_SPI,
638                 };
639                 assert(i <= (uint64_t)UINT_MAX);
640                 vm_ioctl(vm, KVM_IRQFD, &irqfd);
641         }
642
643         for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
644                 val = 1;
645                 ret = write(fd[f], &val, sizeof(uint64_t));
646                 TEST_ASSERT(ret == sizeof(uint64_t),
647                         "Write to KVM_IRQFD failed with ret: %d\n", ret);
648         }
649
650         for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
651                 close(fd[f]);
652 }
653
654 /* handles the valid case: intid=0xffffffff num=1 */
655 #define for_each_intid(first, num, tmp, i)                                      \
656         for ((tmp) = (i) = (first);                                             \
657                 (tmp) < (uint64_t)(first) + (uint64_t)(num);                    \
658                 (tmp)++, (i)++)
659
660 static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
661                 struct kvm_inject_args *inject_args,
662                 struct test_args *test_args)
663 {
664         kvm_inject_cmd cmd = inject_args->cmd;
665         uint32_t intid = inject_args->first_intid;
666         uint32_t num = inject_args->num;
667         int level = inject_args->level;
668         bool expect_failure = inject_args->expect_failure;
669         uint64_t tmp;
670         uint32_t i;
671
672         /* handles the valid case: intid=0xffffffff num=1 */
673         assert(intid < UINT_MAX - num || num == 1);
674
675         switch (cmd) {
676         case KVM_INJECT_EDGE_IRQ_LINE:
677                 for_each_intid(intid, num, tmp, i)
678                         kvm_irq_line_check(vm, i, 1, test_args,
679                                         expect_failure);
680                 for_each_intid(intid, num, tmp, i)
681                         kvm_irq_line_check(vm, i, 0, test_args,
682                                         expect_failure);
683                 break;
684         case KVM_SET_IRQ_LINE:
685                 for_each_intid(intid, num, tmp, i)
686                         kvm_irq_line_check(vm, i, level, test_args,
687                                         expect_failure);
688                 break;
689         case KVM_SET_IRQ_LINE_HIGH:
690                 for_each_intid(intid, num, tmp, i)
691                         kvm_irq_line_check(vm, i, 1, test_args,
692                                         expect_failure);
693                 break;
694         case KVM_SET_LEVEL_INFO_HIGH:
695                 for_each_intid(intid, num, tmp, i)
696                         kvm_irq_set_level_info_check(gic_fd, i, 1,
697                                         expect_failure);
698                 break;
699         case KVM_INJECT_IRQFD:
700                 kvm_routing_and_irqfd_check(vm, intid, num,
701                                         test_args->kvm_max_routes,
702                                         expect_failure);
703                 break;
704         case KVM_WRITE_ISPENDR:
705                 for (i = intid; i < intid + num; i++)
706                         kvm_irq_write_ispendr_check(gic_fd, i,
707                                         VCPU_ID, expect_failure);
708                 break;
709         case KVM_WRITE_ISACTIVER:
710                 for (i = intid; i < intid + num; i++)
711                         kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
712                 break;
713         default:
714                 break;
715         }
716 }
717
718 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
719                 struct kvm_inject_args *args)
720 {
721         struct kvm_inject_args *kvm_args_hva;
722         vm_vaddr_t kvm_args_gva;
723
724         kvm_args_gva = uc->args[1];
725         kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
726         memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
727 }
728
729 static void print_args(struct test_args *args)
730 {
731         printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
732                         args->nr_irqs, args->level_sensitive,
733                         args->eoi_split);
734 }
735
736 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
737 {
738         struct ucall uc;
739         int gic_fd;
740         struct kvm_vm *vm;
741         struct kvm_inject_args inject_args;
742
743         struct test_args args = {
744                 .nr_irqs = nr_irqs,
745                 .level_sensitive = level_sensitive,
746                 .eoi_split = eoi_split,
747                 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
748                 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
749         };
750
751         print_args(&args);
752
753         vm = vm_create_default(VCPU_ID, 0, guest_code);
754         ucall_init(vm, NULL);
755
756         vm_init_descriptor_tables(vm);
757         vcpu_init_descriptor_tables(vm, VCPU_ID);
758
759         /* Setup the guest args page (so it gets the args). */
760         vcpu_args_set(vm, 0, 1, args);
761
762         gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
763                         GICD_BASE_GPA, GICR_BASE_GPA);
764
765         vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
766                 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
767
768         while (1) {
769                 vcpu_run(vm, VCPU_ID);
770
771                 switch (get_ucall(vm, VCPU_ID, &uc)) {
772                 case UCALL_SYNC:
773                         kvm_inject_get_call(vm, &uc, &inject_args);
774                         run_guest_cmd(vm, gic_fd, &inject_args, &args);
775                         break;
776                 case UCALL_ABORT:
777                         TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
778                                         (const char *)uc.args[0],
779                                         __FILE__, uc.args[1], uc.args[2], uc.args[3]);
780                         break;
781                 case UCALL_DONE:
782                         goto done;
783                 default:
784                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
785                 }
786         }
787
788 done:
789         close(gic_fd);
790         kvm_vm_free(vm);
791 }
792
793 static void help(const char *name)
794 {
795         printf(
796         "\n"
797         "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
798         printf(" -n: specify number of IRQs to setup the vgic with. "
799                 "It has to be a multiple of 32 and between 64 and 1024.\n");
800         printf(" -e: if 1 then EOI is split into a write to DIR on top "
801                 "of writing EOI.\n");
802         printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
803         puts("");
804         exit(1);
805 }
806
807 int main(int argc, char **argv)
808 {
809         uint32_t nr_irqs = 64;
810         bool default_args = true;
811         bool level_sensitive = false;
812         int opt;
813         bool eoi_split = false;
814
815         /* Tell stdout not to buffer its content */
816         setbuf(stdout, NULL);
817
818         while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
819                 switch (opt) {
820                 case 'n':
821                         nr_irqs = atoi(optarg);
822                         if (nr_irqs > 1024 || nr_irqs % 32)
823                                 help(argv[0]);
824                         break;
825                 case 'e':
826                         eoi_split = (bool)atoi(optarg);
827                         default_args = false;
828                         break;
829                 case 'l':
830                         level_sensitive = (bool)atoi(optarg);
831                         default_args = false;
832                         break;
833                 case 'h':
834                 default:
835                         help(argv[0]);
836                         break;
837                 }
838         }
839
840         /* If the user just specified nr_irqs and/or gic_version, then run all
841          * combinations.
842          */
843         if (default_args) {
844                 test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
845                 test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
846                 test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
847                 test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
848         } else {
849                 test_vgic(nr_irqs, level_sensitive, eoi_split);
850         }
851
852         return 0;
853 }