its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
if (!desc->its_vmapp_cmd.valid) {
+ alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
if (is_v4_1(its)) {
- alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
its_encode_alloc(cmd, alloc);
/*
* Unmapping a VPE is self-synchronizing on GICv4.1,
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
+ alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
+
if (!is_v4_1(its))
goto out;
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
- alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
-
its_encode_alloc(cmd, alloc);
/*
struct cpumask *table_mask;
unsigned long flags;
+ /*
+ * Check if we're racing against a VPE being destroyed, for
+ * which we don't want to allow a VMOVP.
+ */
+ if (!atomic_read(&vpe->vmapp_count))
+ return -EINVAL;
+
/*
* Changing affinity is mega expensive, so let's be as lazy as
* we can and only do it if we really have to. Also, if mapped
raw_spin_lock_init(&vpe->vpe_lock);
vpe->vpe_id = vpe_id;
vpe->vpt_page = vpt_page;
- if (gic_rdists->has_rvpeid)
- atomic_set(&vpe->vmapp_count, 0);
- else
+ atomic_set(&vpe->vmapp_count, 0);
+ if (!gic_rdists->has_rvpeid)
vpe->vpe_proxy_event = -1;
return 0;