1 // SPDX-License-Identifier: GPL-2.0
3 * vgic_irq.c - Test userspace injection of IRQs
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
12 #include <asm/kvm_para.h>
13 #include <sys/eventfd.h>
14 #include <linux/sizes.h>
16 #include "processor.h"
17 #include "test_util.h"
23 #define GICD_BASE_GPA 0x08000000ULL
24 #define GICR_BASE_GPA 0x080A0000ULL
28 * Stores the user specified args; it's passed to the guest and to every test
32 uint32_t nr_irqs; /* number of KVM supported IRQs. */
33 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
34 bool level_sensitive; /* 1 is level, 0 is edge */
35 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
36 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
40 * KVM implements 32 priority levels:
41 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
43 * Note that these macros will still be correct in the case that KVM implements
44 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
46 #define KVM_NUM_PRIOS 32
47 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
48 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
49 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
50 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
51 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
52 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
54 static void *dist = (void *)GICD_BASE_GPA;
55 static void *redist = (void *)GICR_BASE_GPA;
58 * The kvm_inject_* utilities are used by the guest to ask the host to inject
59 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
63 KVM_INJECT_EDGE_IRQ_LINE = 1,
65 KVM_SET_IRQ_LINE_HIGH,
66 KVM_SET_LEVEL_INFO_HIGH,
72 struct kvm_inject_args {
80 /* Used on the guest side to perform the hypercall. */
81 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
82 uint32_t num, int level, bool expect_failure);
84 /* Used on the host side to get the hypercall info. */
85 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
86 struct kvm_inject_args *args);
88 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
89 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
91 #define KVM_INJECT_MULTI(cmd, intid, num) \
92 _KVM_INJECT_MULTI(cmd, intid, num, false)
94 #define _KVM_INJECT(cmd, intid, expect_failure) \
95 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
97 #define KVM_INJECT(cmd, intid) \
98 _KVM_INJECT_MULTI(cmd, intid, 1, false)
100 #define KVM_ACTIVATE(cmd, intid) \
101 kvm_inject_call(cmd, intid, 1, 1, false);
103 struct kvm_inject_desc {
105 /* can inject PPIs, PPIs, and/or SPIs. */
109 static struct kvm_inject_desc inject_edge_fns[] = {
111 { KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
112 { KVM_INJECT_IRQFD, false, false, true },
113 { KVM_WRITE_ISPENDR, true, false, true },
117 static struct kvm_inject_desc inject_level_fns[] = {
119 { KVM_SET_IRQ_LINE_HIGH, false, true, true },
120 { KVM_SET_LEVEL_INFO_HIGH, false, true, true },
121 { KVM_INJECT_IRQFD, false, false, true },
122 { KVM_WRITE_ISPENDR, false, true, true },
126 static struct kvm_inject_desc set_active_fns[] = {
128 { KVM_WRITE_ISACTIVER, true, true, true },
132 #define for_each_inject_fn(t, f) \
133 for ((f) = (t); (f)->cmd; (f)++)
135 #define for_each_supported_inject_fn(args, t, f) \
136 for_each_inject_fn(t, f) \
137 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
139 #define for_each_supported_activate_fn(args, t, f) \
140 for_each_supported_inject_fn((args), (t), (f))
142 /* Shared between the guest main thread and the IRQ handlers. */
143 volatile uint64_t irq_handled;
144 volatile uint32_t irqnr_received[MAX_SPI + 1];
146 static void reset_stats(void)
151 for (i = 0; i <= MAX_SPI; i++)
152 irqnr_received[i] = 0;
155 static uint64_t gic_read_ap1r0(void)
157 uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
163 static void gic_write_ap1r0(uint64_t val)
165 write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
169 static void guest_set_irq_line(uint32_t intid, uint32_t level);
171 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
173 uint32_t intid = gic_get_and_ack_irq();
175 if (intid == IAR_SPURIOUS)
178 GUEST_ASSERT(gic_irq_get_active(intid));
180 if (!level_sensitive)
181 GUEST_ASSERT(!gic_irq_get_pending(intid));
184 guest_set_irq_line(intid, 0);
186 GUEST_ASSERT(intid < MAX_SPI);
187 irqnr_received[intid] += 1;
191 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
195 GUEST_ASSERT(!gic_irq_get_active(intid));
196 GUEST_ASSERT(!gic_irq_get_pending(intid));
199 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
200 uint32_t num, int level, bool expect_failure)
202 struct kvm_inject_args args = {
204 .first_intid = first_intid,
207 .expect_failure = expect_failure,
212 #define GUEST_ASSERT_IAR_EMPTY() \
215 _intid = gic_get_and_ack_irq(); \
216 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
219 #define CAT_HELPER(a, b) a ## b
220 #define CAT(a, b) CAT_HELPER(a, b)
221 #define PREFIX guest_irq_handler_
222 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
223 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
224 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
226 guest_irq_generic_handler(split, lev); \
229 GENERATE_GUEST_IRQ_HANDLER(0, 0);
230 GENERATE_GUEST_IRQ_HANDLER(0, 1);
231 GENERATE_GUEST_IRQ_HANDLER(1, 0);
232 GENERATE_GUEST_IRQ_HANDLER(1, 1);
234 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
235 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
236 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
239 static void reset_priorities(struct test_args *args)
243 for (i = 0; i < args->nr_irqs; i++)
244 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
247 static void guest_set_irq_line(uint32_t intid, uint32_t level)
249 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
252 static void test_inject_fail(struct test_args *args,
253 uint32_t intid, kvm_inject_cmd cmd)
257 _KVM_INJECT(cmd, intid, true);
258 /* no IRQ to handle on entry */
260 GUEST_ASSERT_EQ(irq_handled, 0);
261 GUEST_ASSERT_IAR_EMPTY();
264 static void guest_inject(struct test_args *args,
265 uint32_t first_intid, uint32_t num,
272 /* Cycle over all priorities to make things more interesting. */
273 for (i = first_intid; i < num + first_intid; i++)
274 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
276 asm volatile("msr daifset, #2" : : : "memory");
277 KVM_INJECT_MULTI(cmd, first_intid, num);
279 while (irq_handled < num) {
286 asm volatile("msr daifclr, #2" : : : "memory");
288 GUEST_ASSERT_EQ(irq_handled, num);
289 for (i = first_intid; i < num + first_intid; i++)
290 GUEST_ASSERT_EQ(irqnr_received[i], 1);
291 GUEST_ASSERT_IAR_EMPTY();
293 reset_priorities(args);
297 * Restore the active state of multiple concurrent IRQs (given by
298 * concurrent_irqs). This does what a live-migration would do on the
299 * destination side assuming there are some active IRQs that were not
302 static void guest_restore_active(struct test_args *args,
303 uint32_t first_intid, uint32_t num,
306 uint32_t prio, intid, ap1r;
309 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
310 * in descending order, so intid+1 can preempt intid.
312 for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
313 GUEST_ASSERT(prio >= 0);
314 intid = i + first_intid;
315 gic_set_priority(intid, prio);
318 /* In a real migration, KVM would restore all GIC state before running
321 for (i = 0; i < num; i++) {
322 intid = i + first_intid;
323 KVM_ACTIVATE(cmd, intid);
324 ap1r = gic_read_ap1r0();
326 gic_write_ap1r0(ap1r);
329 /* This is where the "migration" would occur. */
331 /* finish handling the IRQs starting with the highest priority one. */
332 for (i = 0; i < num; i++) {
333 intid = num - i - 1 + first_intid;
339 for (i = 0; i < num; i++)
340 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
341 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
342 GUEST_ASSERT_IAR_EMPTY();
346 * Polls the IAR until it's not a spurious interrupt.
348 * This function should only be used in test_inject_preemption (with IRQs
351 static uint32_t wait_for_and_activate_irq(void)
356 asm volatile("wfi" : : : "memory");
357 intid = gic_get_and_ack_irq();
358 } while (intid == IAR_SPURIOUS);
364 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
365 * handle them without handling the actual exceptions. This is done by masking
366 * interrupts for the whole test.
368 static void test_inject_preemption(struct test_args *args,
369 uint32_t first_intid, int num,
372 uint32_t intid, prio, step = KVM_PRIO_STEPS;
375 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
376 * in descending order, so intid+1 can preempt intid.
378 for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
379 GUEST_ASSERT(prio >= 0);
380 intid = i + first_intid;
381 gic_set_priority(intid, prio);
386 for (i = 0; i < num; i++) {
388 intid = i + first_intid;
389 KVM_INJECT(cmd, intid);
390 /* Each successive IRQ will preempt the previous one. */
391 tmp = wait_for_and_activate_irq();
392 GUEST_ASSERT_EQ(tmp, intid);
393 if (args->level_sensitive)
394 guest_set_irq_line(intid, 0);
397 /* finish handling the IRQs starting with the highest priority one. */
398 for (i = 0; i < num; i++) {
399 intid = num - i - 1 + first_intid;
407 for (i = 0; i < num; i++)
408 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
409 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
410 GUEST_ASSERT_IAR_EMPTY();
412 reset_priorities(args);
415 static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
417 uint32_t nr_irqs = args->nr_irqs;
420 guest_inject(args, MIN_SGI, 1, f->cmd);
421 guest_inject(args, 0, 16, f->cmd);
425 guest_inject(args, MIN_PPI, 1, f->cmd);
428 guest_inject(args, MIN_SPI, 1, f->cmd);
429 guest_inject(args, nr_irqs - 1, 1, f->cmd);
430 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
434 static void test_injection_failure(struct test_args *args,
435 struct kvm_inject_desc *f)
437 uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
440 for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
441 test_inject_fail(args, bad_intid[i], f->cmd);
444 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
447 * Test up to 4 levels of preemption. The reason is that KVM doesn't
448 * currently implement the ability to have more than the number-of-LRs
449 * number of concurrently active IRQs. The number of LRs implemented is
450 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
453 test_inject_preemption(args, MIN_SGI, 4, f->cmd);
456 test_inject_preemption(args, MIN_PPI, 4, f->cmd);
459 test_inject_preemption(args, MIN_SPI, 4, f->cmd);
462 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
464 /* Test up to 4 active IRQs. Same reason as in test_preemption. */
466 guest_restore_active(args, MIN_SGI, 4, f->cmd);
469 guest_restore_active(args, MIN_PPI, 4, f->cmd);
472 guest_restore_active(args, MIN_SPI, 4, f->cmd);
475 static void guest_code(struct test_args args)
477 uint32_t i, nr_irqs = args.nr_irqs;
478 bool level_sensitive = args.level_sensitive;
479 struct kvm_inject_desc *f, *inject_fns;
481 gic_init(GIC_V3, 1, dist, redist);
483 for (i = 0; i < nr_irqs; i++)
486 for (i = MIN_SPI; i < nr_irqs; i++)
487 gic_irq_set_config(i, !args.level_sensitive);
489 gic_set_eoi_split(args.eoi_split);
491 reset_priorities(&args);
492 gic_set_priority_mask(CPU_PRIO_MASK);
494 inject_fns = level_sensitive ? inject_level_fns
499 /* Start the tests. */
500 for_each_supported_inject_fn(&args, inject_fns, f) {
501 test_injection(&args, f);
502 test_preemption(&args, f);
503 test_injection_failure(&args, f);
506 /* Restore the active state of IRQs. This would happen when live
507 * migrating IRQs in the middle of being handled.
509 for_each_supported_activate_fn(&args, set_active_fns, f)
510 test_restore_active(&args, f);
515 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
516 struct test_args *test_args, bool expect_failure)
520 if (!expect_failure) {
521 kvm_arm_irq_line(vm, intid, level);
523 /* The interface doesn't allow larger intid's. */
524 if (intid > KVM_ARM_IRQ_NUM_MASK)
527 ret = _kvm_arm_irq_line(vm, intid, level);
528 TEST_ASSERT(ret != 0 && errno == EINVAL,
529 "Bad intid %i did not cause KVM_IRQ_LINE "
530 "error: rc: %i errno: %i", intid, ret, errno);
534 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
537 if (!expect_failure) {
538 kvm_irq_set_level_info(gic_fd, intid, level);
540 int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
542 * The kernel silently fails for invalid SPIs and SGIs (which
543 * are not level-sensitive). It only checks for intid to not
544 * spill over 1U << 10 (the max reserved SPI). Also, callers
545 * are supposed to mask the intid with 0x3ff (1023).
547 if (intid > VGIC_MAX_RESERVED)
548 TEST_ASSERT(ret != 0 && errno == EINVAL,
549 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
550 "error: rc: %i errno: %i", intid, ret, errno);
552 TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
553 "for intid %i failed, rc: %i errno: %i",
558 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
559 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
562 struct kvm_irq_routing *routing;
566 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
568 routing = kvm_gsi_routing_create();
569 for (i = intid; i < (uint64_t)intid + num; i++)
570 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
572 if (!expect_failure) {
573 kvm_gsi_routing_write(vm, routing);
575 ret = _kvm_gsi_routing_write(vm, routing);
576 /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
577 if (intid >= KVM_IRQCHIP_NUM_PINS)
578 TEST_ASSERT(ret != 0 && errno == EINVAL,
579 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
580 "error: rc: %i errno: %i", intid, ret, errno);
582 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
583 "for intid %i failed, rc: %i errno: %i",
588 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
589 uint32_t vcpu, bool expect_failure)
592 * Ignore this when expecting failure as invalid intids will lead to
593 * either trying to inject SGIs when we configured the test to be
594 * level_sensitive (or the reverse), or inject large intids which
595 * will lead to writing above the ISPENDR register space (and we
596 * don't want to do that either).
599 kvm_irq_write_ispendr(gic_fd, intid, vcpu);
602 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
603 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
612 * There is no way to try injecting an SGI or PPI as the interface
613 * starts counting from the first SPI (above the private ones), so just
616 if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
619 kvm_set_gsi_routing_irqchip_check(vm, intid, num,
620 kvm_max_routes, expect_failure);
623 * If expect_failure, then just to inject anyway. These
624 * will silently fail. And in any case, the guest will check
625 * that no actual interrupt was injected for those cases.
628 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
629 fd[f] = eventfd(0, 0);
630 TEST_ASSERT(fd[f] != -1,
631 "eventfd failed, errno: %i\n", errno);
634 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
635 struct kvm_irqfd irqfd = {
639 assert(i <= (uint64_t)UINT_MAX);
640 vm_ioctl(vm, KVM_IRQFD, &irqfd);
643 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
645 ret = write(fd[f], &val, sizeof(uint64_t));
646 TEST_ASSERT(ret == sizeof(uint64_t),
647 "Write to KVM_IRQFD failed with ret: %d\n", ret);
650 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
654 /* handles the valid case: intid=0xffffffff num=1 */
655 #define for_each_intid(first, num, tmp, i) \
656 for ((tmp) = (i) = (first); \
657 (tmp) < (uint64_t)(first) + (uint64_t)(num); \
660 static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
661 struct kvm_inject_args *inject_args,
662 struct test_args *test_args)
664 kvm_inject_cmd cmd = inject_args->cmd;
665 uint32_t intid = inject_args->first_intid;
666 uint32_t num = inject_args->num;
667 int level = inject_args->level;
668 bool expect_failure = inject_args->expect_failure;
672 /* handles the valid case: intid=0xffffffff num=1 */
673 assert(intid < UINT_MAX - num || num == 1);
676 case KVM_INJECT_EDGE_IRQ_LINE:
677 for_each_intid(intid, num, tmp, i)
678 kvm_irq_line_check(vm, i, 1, test_args,
680 for_each_intid(intid, num, tmp, i)
681 kvm_irq_line_check(vm, i, 0, test_args,
684 case KVM_SET_IRQ_LINE:
685 for_each_intid(intid, num, tmp, i)
686 kvm_irq_line_check(vm, i, level, test_args,
689 case KVM_SET_IRQ_LINE_HIGH:
690 for_each_intid(intid, num, tmp, i)
691 kvm_irq_line_check(vm, i, 1, test_args,
694 case KVM_SET_LEVEL_INFO_HIGH:
695 for_each_intid(intid, num, tmp, i)
696 kvm_irq_set_level_info_check(gic_fd, i, 1,
699 case KVM_INJECT_IRQFD:
700 kvm_routing_and_irqfd_check(vm, intid, num,
701 test_args->kvm_max_routes,
704 case KVM_WRITE_ISPENDR:
705 for (i = intid; i < intid + num; i++)
706 kvm_irq_write_ispendr_check(gic_fd, i,
707 VCPU_ID, expect_failure);
709 case KVM_WRITE_ISACTIVER:
710 for (i = intid; i < intid + num; i++)
711 kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
718 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
719 struct kvm_inject_args *args)
721 struct kvm_inject_args *kvm_args_hva;
722 vm_vaddr_t kvm_args_gva;
724 kvm_args_gva = uc->args[1];
725 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
726 memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
729 static void print_args(struct test_args *args)
731 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
732 args->nr_irqs, args->level_sensitive,
736 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
741 struct kvm_inject_args inject_args;
743 struct test_args args = {
745 .level_sensitive = level_sensitive,
746 .eoi_split = eoi_split,
747 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
748 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
753 vm = vm_create_default(VCPU_ID, 0, guest_code);
754 ucall_init(vm, NULL);
756 vm_init_descriptor_tables(vm);
757 vcpu_init_descriptor_tables(vm, VCPU_ID);
759 /* Setup the guest args page (so it gets the args). */
760 vcpu_args_set(vm, 0, 1, args);
762 gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
763 GICD_BASE_GPA, GICR_BASE_GPA);
765 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
766 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
769 vcpu_run(vm, VCPU_ID);
771 switch (get_ucall(vm, VCPU_ID, &uc)) {
773 kvm_inject_get_call(vm, &uc, &inject_args);
774 run_guest_cmd(vm, gic_fd, &inject_args, &args);
777 TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
778 (const char *)uc.args[0],
779 __FILE__, uc.args[1], uc.args[2], uc.args[3]);
784 TEST_FAIL("Unknown ucall %lu", uc.cmd);
793 static void help(const char *name)
797 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
798 printf(" -n: specify number of IRQs to setup the vgic with. "
799 "It has to be a multiple of 32 and between 64 and 1024.\n");
800 printf(" -e: if 1 then EOI is split into a write to DIR on top "
801 "of writing EOI.\n");
802 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
807 int main(int argc, char **argv)
809 uint32_t nr_irqs = 64;
810 bool default_args = true;
811 bool level_sensitive = false;
813 bool eoi_split = false;
815 /* Tell stdout not to buffer its content */
816 setbuf(stdout, NULL);
818 while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
821 nr_irqs = atoi(optarg);
822 if (nr_irqs > 1024 || nr_irqs % 32)
826 eoi_split = (bool)atoi(optarg);
827 default_args = false;
830 level_sensitive = (bool)atoi(optarg);
831 default_args = false;
840 /* If the user just specified nr_irqs and/or gic_version, then run all
844 test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
845 test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
846 test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
847 test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
849 test_vgic(nr_irqs, level_sensitive, eoi_split);