kvm: selftests: aarch64: pass vgic_irq guest args as a pointer
[linux-2.6-microblaze.git] / tools / testing / selftests / kvm / aarch64 / vgic_irq.c
index 9f1674b..b701eb8 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <asm/kvm.h>
 #include <asm/kvm_para.h>
+#include <sys/eventfd.h>
 #include <linux/sizes.h>
 
 #include "processor.h"
@@ -31,6 +32,8 @@ struct test_args {
        uint32_t nr_irqs; /* number of KVM supported IRQs. */
        bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
        bool level_sensitive; /* 1 is level, 0 is edge */
+       int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
+       bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
 };
 
 /*
@@ -61,6 +64,9 @@ typedef enum {
        KVM_SET_IRQ_LINE,
        KVM_SET_IRQ_LINE_HIGH,
        KVM_SET_LEVEL_INFO_HIGH,
+       KVM_INJECT_IRQFD,
+       KVM_WRITE_ISPENDR,
+       KVM_WRITE_ISACTIVER,
 } kvm_inject_cmd;
 
 struct kvm_inject_args {
@@ -91,6 +97,9 @@ static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
 #define KVM_INJECT(cmd, intid)                                                 \
        _KVM_INJECT_MULTI(cmd, intid, 1, false)
 
+#define KVM_ACTIVATE(cmd, intid)                                               \
+       kvm_inject_call(cmd, intid, 1, 1, false);
+
 struct kvm_inject_desc {
        kvm_inject_cmd cmd;
        /* can inject PPIs, PPIs, and/or SPIs. */
@@ -100,6 +109,8 @@ struct kvm_inject_desc {
 static struct kvm_inject_desc inject_edge_fns[] = {
        /*                                      sgi    ppi    spi */
        { KVM_INJECT_EDGE_IRQ_LINE,             false, false, true },
+       { KVM_INJECT_IRQFD,                     false, false, true },
+       { KVM_WRITE_ISPENDR,                    true,  false, true },
        { 0, },
 };
 
@@ -107,12 +118,27 @@ static struct kvm_inject_desc inject_level_fns[] = {
        /*                                      sgi    ppi    spi */
        { KVM_SET_IRQ_LINE_HIGH,                false, true,  true },
        { KVM_SET_LEVEL_INFO_HIGH,              false, true,  true },
+       { KVM_INJECT_IRQFD,                     false, false, true },
+       { KVM_WRITE_ISPENDR,                    false, true,  true },
+       { 0, },
+};
+
+static struct kvm_inject_desc set_active_fns[] = {
+       /*                                      sgi    ppi    spi */
+       { KVM_WRITE_ISACTIVER,                  true,  true,  true },
        { 0, },
 };
 
 #define for_each_inject_fn(t, f)                                               \
        for ((f) = (t); (f)->cmd; (f)++)
 
+#define for_each_supported_inject_fn(args, t, f)                               \
+       for_each_inject_fn(t, f)                                                \
+               if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
+
+#define for_each_supported_activate_fn(args, t, f)                             \
+       for_each_supported_inject_fn((args), (t), (f))
+
 /* Shared between the guest main thread and the IRQ handlers. */
 volatile uint64_t irq_handled;
 volatile uint32_t irqnr_received[MAX_SPI + 1];
@@ -134,6 +160,12 @@ static uint64_t gic_read_ap1r0(void)
        return reg;
 }
 
+static void gic_write_ap1r0(uint64_t val)
+{
+       write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
+       isb();
+}
+
 static void guest_set_irq_line(uint32_t intid, uint32_t level);
 
 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
@@ -261,6 +293,55 @@ static void guest_inject(struct test_args *args,
        reset_priorities(args);
 }
 
+/*
+ * Restore the active state of multiple concurrent IRQs (given by
+ * concurrent_irqs).  This does what a live-migration would do on the
+ * destination side assuming there are some active IRQs that were not
+ * deactivated yet.
+ */
+static void guest_restore_active(struct test_args *args,
+               uint32_t first_intid, uint32_t num,
+               kvm_inject_cmd cmd)
+{
+       uint32_t prio, intid, ap1r;
+       int i;
+
+       /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
+        * in descending order, so intid+1 can preempt intid.
+        */
+       for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
+               GUEST_ASSERT(prio >= 0);
+               intid = i + first_intid;
+               gic_set_priority(intid, prio);
+       }
+
+       /* In a real migration, KVM would restore all GIC state before running
+        * guest code.
+        */
+       for (i = 0; i < num; i++) {
+               intid = i + first_intid;
+               KVM_ACTIVATE(cmd, intid);
+               ap1r = gic_read_ap1r0();
+               ap1r |= 1U << i;
+               gic_write_ap1r0(ap1r);
+       }
+
+       /* This is where the "migration" would occur. */
+
+       /* finish handling the IRQs starting with the highest priority one. */
+       for (i = 0; i < num; i++) {
+               intid = num - i - 1 + first_intid;
+               gic_set_eoi(intid);
+               if (args->eoi_split)
+                       gic_set_dir(intid);
+       }
+
+       for (i = 0; i < num; i++)
+               GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
+       GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
+       GUEST_ASSERT_IAR_EMPTY();
+}
+
 /*
  * Polls the IAR until it's not a spurious interrupt.
  *
@@ -378,10 +459,23 @@ static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
                test_inject_preemption(args, MIN_SPI, 4, f->cmd);
 }
 
-static void guest_code(struct test_args args)
+static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
+{
+       /* Test up to 4 active IRQs. Same reason as in test_preemption. */
+       if (f->sgi)
+               guest_restore_active(args, MIN_SGI, 4, f->cmd);
+
+       if (f->ppi)
+               guest_restore_active(args, MIN_PPI, 4, f->cmd);
+
+       if (f->spi)
+               guest_restore_active(args, MIN_SPI, 4, f->cmd);
+}
+
+static void guest_code(struct test_args *args)
 {
-       uint32_t i, nr_irqs = args.nr_irqs;
-       bool level_sensitive = args.level_sensitive;
+       uint32_t i, nr_irqs = args->nr_irqs;
+       bool level_sensitive = args->level_sensitive;
        struct kvm_inject_desc *f, *inject_fns;
 
        gic_init(GIC_V3, 1, dist, redist);
@@ -390,11 +484,11 @@ static void guest_code(struct test_args args)
                gic_irq_enable(i);
 
        for (i = MIN_SPI; i < nr_irqs; i++)
-               gic_irq_set_config(i, !args.level_sensitive);
+               gic_irq_set_config(i, !level_sensitive);
 
-       gic_set_eoi_split(args.eoi_split);
+       gic_set_eoi_split(args->eoi_split);
 
-       reset_priorities(&args);
+       reset_priorities(args);
        gic_set_priority_mask(CPU_PRIO_MASK);
 
        inject_fns  = level_sensitive ? inject_level_fns
@@ -403,12 +497,18 @@ static void guest_code(struct test_args args)
        local_irq_enable();
 
        /* Start the tests. */
-       for_each_inject_fn(inject_fns, f) {
-               test_injection(&args, f);
-               test_preemption(&args, f);
-               test_injection_failure(&args, f);
+       for_each_supported_inject_fn(args, inject_fns, f) {
+               test_injection(args, f);
+               test_preemption(args, f);
+               test_injection_failure(args, f);
        }
 
+       /* Restore the active state of IRQs. This would happen when live
+        * migrating IRQs in the middle of being handled.
+        */
+       for_each_supported_activate_fn(args, set_active_fns, f)
+               test_restore_active(args, f);
+
        GUEST_DONE();
 }
 
@@ -455,6 +555,102 @@ void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
        }
 }
 
+static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
+               uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
+               bool expect_failure)
+{
+       struct kvm_irq_routing *routing;
+       int ret;
+       uint64_t i;
+
+       assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
+
+       routing = kvm_gsi_routing_create();
+       for (i = intid; i < (uint64_t)intid + num; i++)
+               kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
+
+       if (!expect_failure) {
+               kvm_gsi_routing_write(vm, routing);
+       } else {
+               ret = _kvm_gsi_routing_write(vm, routing);
+               /* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
+               if (intid >= KVM_IRQCHIP_NUM_PINS)
+                       TEST_ASSERT(ret != 0 && errno == EINVAL,
+                               "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
+                               "error: rc: %i errno: %i", intid, ret, errno);
+               else
+                       TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
+                               "for intid %i failed, rc: %i errno: %i",
+                               intid, ret, errno);
+       }
+}
+
+static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
+                       uint32_t vcpu, bool expect_failure)
+{
+       /*
+        * Ignore this when expecting failure as invalid intids will lead to
+        * either trying to inject SGIs when we configured the test to be
+        * level_sensitive (or the reverse), or inject large intids which
+        * will lead to writing above the ISPENDR register space (and we
+        * don't want to do that either).
+        */
+       if (!expect_failure)
+               kvm_irq_write_ispendr(gic_fd, intid, vcpu);
+}
+
+static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
+               uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
+               bool expect_failure)
+{
+       int fd[MAX_SPI];
+       uint64_t val;
+       int ret, f;
+       uint64_t i;
+
+       /*
+        * There is no way to try injecting an SGI or PPI as the interface
+        * starts counting from the first SPI (above the private ones), so just
+        * exit.
+        */
+       if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
+               return;
+
+       kvm_set_gsi_routing_irqchip_check(vm, intid, num,
+                       kvm_max_routes, expect_failure);
+
+       /*
+        * If expect_failure, then just to inject anyway. These
+        * will silently fail. And in any case, the guest will check
+        * that no actual interrupt was injected for those cases.
+        */
+
+       for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
+               fd[f] = eventfd(0, 0);
+               TEST_ASSERT(fd[f] != -1,
+                       "eventfd failed, errno: %i\n", errno);
+       }
+
+       for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
+               struct kvm_irqfd irqfd = {
+                       .fd  = fd[f],
+                       .gsi = i - MIN_SPI,
+               };
+               assert(i <= (uint64_t)UINT_MAX);
+               vm_ioctl(vm, KVM_IRQFD, &irqfd);
+       }
+
+       for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
+               val = 1;
+               ret = write(fd[f], &val, sizeof(uint64_t));
+               TEST_ASSERT(ret == sizeof(uint64_t),
+                       "Write to KVM_IRQFD failed with ret: %d\n", ret);
+       }
+
+       for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
+               close(fd[f]);
+}
+
 /* handles the valid case: intid=0xffffffff num=1 */
 #define for_each_intid(first, num, tmp, i)                                     \
        for ((tmp) = (i) = (first);                                             \
@@ -500,6 +696,20 @@ static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
                        kvm_irq_set_level_info_check(gic_fd, i, 1,
                                        expect_failure);
                break;
+       case KVM_INJECT_IRQFD:
+               kvm_routing_and_irqfd_check(vm, intid, num,
+                                       test_args->kvm_max_routes,
+                                       expect_failure);
+               break;
+       case KVM_WRITE_ISPENDR:
+               for (i = intid; i < intid + num; i++)
+                       kvm_irq_write_ispendr_check(gic_fd, i,
+                                       VCPU_ID, expect_failure);
+               break;
+       case KVM_WRITE_ISACTIVER:
+               for (i = intid; i < intid + num; i++)
+                       kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
+               break;
        default:
                break;
        }
@@ -529,11 +739,14 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
        int gic_fd;
        struct kvm_vm *vm;
        struct kvm_inject_args inject_args;
+       vm_vaddr_t args_gva;
 
        struct test_args args = {
                .nr_irqs = nr_irqs,
                .level_sensitive = level_sensitive,
                .eoi_split = eoi_split,
+               .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
+               .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
        };
 
        print_args(&args);
@@ -545,7 +758,9 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
        vcpu_init_descriptor_tables(vm, VCPU_ID);
 
        /* Setup the guest args page (so it gets the args). */
-       vcpu_args_set(vm, 0, 1, args);
+       args_gva = vm_vaddr_alloc_page(vm);
+       memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
+       vcpu_args_set(vm, 0, 1, args_gva);
 
        gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
                        GICD_BASE_GPA, GICR_BASE_GPA);