irqchip/gic-v4: Don't allow a VMOVP on a dying VPE
authorMarc Zyngier <maz@kernel.org>
Wed, 2 Oct 2024 20:49:59 +0000 (21:49 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 8 Oct 2024 15:44:27 +0000 (17:44 +0200)
Kunkun Jiang reported that there is a small window of opportunity for
userspace to force a change of affinity for a VPE while the VPE has already
been unmapped, but the corresponding doorbell interrupt still visible in
/proc/irq/.

Plug the race by checking the value of vmapp_count, which tracks whether
the VPE is mapped ot not, and returning an error in this case.

This involves making vmapp_count common to both GICv4.1 and its v4.0
ancestor.

Fixes: 64edfaa9a234 ("irqchip/gic-v4.1: Implement the v4.1 flavour of VMAPP")
Reported-by: Kunkun Jiang <jiangkunkun@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/c182ece6-2ba0-ce4f-3404-dba7a3ab6c52@huawei.com
Link: https://lore.kernel.org/all/20241002204959.2051709-1-maz@kernel.org
drivers/irqchip/irq-gic-v3-its.c
include/linux/irqchip/arm-gic-v4.h

index fdec478..ab597e7 100644 (file)
@@ -797,8 +797,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
        its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
 
        if (!desc->its_vmapp_cmd.valid) {
+               alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
                if (is_v4_1(its)) {
-                       alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
                        its_encode_alloc(cmd, alloc);
                        /*
                         * Unmapping a VPE is self-synchronizing on GICv4.1,
@@ -817,13 +817,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
        its_encode_vpt_addr(cmd, vpt_addr);
        its_encode_vpt_size(cmd, LPI_NRBITS - 1);
 
+       alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
        if (!is_v4_1(its))
                goto out;
 
        vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
 
-       alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
-
        its_encode_alloc(cmd, alloc);
 
        /*
@@ -3806,6 +3806,13 @@ static int its_vpe_set_affinity(struct irq_data *d,
        struct cpumask *table_mask;
        unsigned long flags;
 
+       /*
+        * Check if we're racing against a VPE being destroyed, for
+        * which we don't want to allow a VMOVP.
+        */
+       if (!atomic_read(&vpe->vmapp_count))
+               return -EINVAL;
+
        /*
         * Changing affinity is mega expensive, so let's be as lazy as
         * we can and only do it if we really have to. Also, if mapped
@@ -4463,9 +4470,8 @@ static int its_vpe_init(struct its_vpe *vpe)
        raw_spin_lock_init(&vpe->vpe_lock);
        vpe->vpe_id = vpe_id;
        vpe->vpt_page = vpt_page;
-       if (gic_rdists->has_rvpeid)
-               atomic_set(&vpe->vmapp_count, 0);
-       else
+       atomic_set(&vpe->vmapp_count, 0);
+       if (!gic_rdists->has_rvpeid)
                vpe->vpe_proxy_event = -1;
 
        return 0;
index ecabed6..7f1f11a 100644 (file)
@@ -66,10 +66,12 @@ struct its_vpe {
                                bool    enabled;
                                bool    group;
                        }                       sgi_config[16];
-                       atomic_t vmapp_count;
                };
        };
 
+       /* Track the VPE being mapped */
+       atomic_t vmapp_count;
+
        /*
         * Ensures mutual exclusion between affinity setting of the
         * vPE and vLPI operations using vpe->col_idx.