1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm_host.h>
4 #include <asm/irq_remapping.h>
9 #include "posted_intr.h"
14 * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler()
15 * when a WAKEUP_VECTOR interrupted is posted. vCPUs are added to the list when
16 * the vCPU is scheduled out and is blocking (e.g. in HLT) with IRQs enabled.
17 * The vCPUs posted interrupt descriptor is updated at the same time to set its
18 * notification vector to WAKEUP_VECTOR, so that posted interrupt from devices
19 * wake the target vCPUs. vCPUs are removed from the list and the notification
20 * vector is reset when the vCPU is scheduled in.
22 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
24 * Protect the per-CPU list with a per-CPU spinlock to handle task migration.
25 * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the
26 * ->sched_in() path will need to take the vCPU off the list of the _previous_
27 * CPU. IRQs must be disabled when taking this lock, otherwise deadlock will
28 * occur if a wakeup IRQ arrives and attempts to acquire the lock.
30 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
32 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
34 return &(to_vmx(vcpu)->pi_desc);
37 static int pi_try_set_control(struct pi_desc *pi_desc, u64 old, u64 new)
40 * PID.ON can be set at any time by a different vCPU or by hardware,
41 * e.g. a device. PID.control must be written atomically, and the
42 * update must be retried with a fresh snapshot an ON change causes
43 * the cmpxchg to fail.
45 if (cmpxchg64(&pi_desc->control, old, new) != old)
51 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
53 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
54 struct pi_desc old, new;
58 * To simplify hot-plug and dynamic toggling of APICv, keep PI.NDST and
59 * PI.SN up-to-date even if there is no assigned device or if APICv is
60 * deactivated due to a dynamic inhibit bit, e.g. for Hyper-V's SyncIC.
62 if (!enable_apicv || !lapic_in_kernel(vcpu))
65 /* Nothing to do if PI.SN and PI.NDST both have the desired value. */
66 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
70 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
71 * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
72 * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
73 * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
76 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
81 /* The full case. Set the new destination and clear SN. */
82 dest = cpu_physical_id(cpu);
84 dest = (dest << 8) & 0xFF00;
87 old.control = new.control = READ_ONCE(pi_desc->control);
91 } while (pi_try_set_control(pi_desc, old.control, new.control));
96 * Clear SN before reading the bitmap. The VT-d firmware
97 * writes the bitmap and reads SN atomically (5.2.3 in the
98 * spec), so it doesn't really have a memory barrier that
99 * pairs with this, but we cannot do that and we need one.
101 smp_mb__after_atomic();
103 if (!pi_is_pir_empty(pi_desc))
107 static bool vmx_can_use_vtd_pi(struct kvm *kvm)
109 return irqchip_in_kernel(kvm) && enable_apicv &&
110 kvm_arch_has_assigned_device(kvm) &&
111 irq_remapping_cap(IRQ_POSTING_CAP);
114 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
116 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
118 if (!vmx_can_use_vtd_pi(vcpu->kvm))
121 /* Set SN when the vCPU is preempted */
126 static void __pi_post_block(struct kvm_vcpu *vcpu)
128 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
129 struct pi_desc old, new;
133 * Remove the vCPU from the wakeup list of the _previous_ pCPU, which
134 * will not be the same as the current pCPU if the task was migrated.
136 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
137 list_del(&vcpu->blocked_vcpu_list);
138 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
140 dest = cpu_physical_id(vcpu->cpu);
142 dest = (dest << 8) & 0xFF00;
144 WARN(pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR,
145 "Wakeup handler not enabled while the vCPU was blocking");
148 old.control = new.control = READ_ONCE(pi_desc->control);
152 /* set 'NV' to 'notification vector' */
153 new.nv = POSTED_INTR_VECTOR;
154 } while (pi_try_set_control(pi_desc, old.control, new.control));
160 * This routine does the following things for vCPU which is going
161 * to be blocked if VT-d PI is enabled.
162 * - Store the vCPU to the wakeup list, so when interrupts happen
163 * we can find the right vCPU to wake up.
164 * - Change the Posted-interrupt descriptor as below:
165 * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
166 * - If 'ON' is set during this process, which means at least one
167 * interrupt is posted for this vCPU, we cannot block it, in
168 * this case, return 1, otherwise, return 0.
171 int pi_pre_block(struct kvm_vcpu *vcpu)
173 struct pi_desc old, new;
174 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
177 if (!vmx_can_use_vtd_pi(vcpu->kvm) ||
178 vmx_interrupt_blocked(vcpu))
181 local_irq_save(flags);
183 vcpu->pre_pcpu = vcpu->cpu;
184 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
185 list_add_tail(&vcpu->blocked_vcpu_list,
186 &per_cpu(blocked_vcpu_on_cpu, vcpu->cpu));
187 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
189 WARN(pi_desc->sn == 1,
190 "Posted Interrupt Suppress Notification set before blocking");
193 old.control = new.control = READ_ONCE(pi_desc->control);
195 /* set 'NV' to 'wakeup vector' */
196 new.nv = POSTED_INTR_WAKEUP_VECTOR;
197 } while (pi_try_set_control(pi_desc, old.control, new.control));
199 /* We should not block the vCPU if an interrupt is posted for it. */
200 if (pi_test_on(pi_desc))
201 __pi_post_block(vcpu);
203 local_irq_restore(flags);
204 return (vcpu->pre_pcpu == -1);
207 void pi_post_block(struct kvm_vcpu *vcpu)
211 if (vcpu->pre_pcpu == -1)
214 local_irq_save(flags);
215 __pi_post_block(vcpu);
216 local_irq_restore(flags);
220 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
222 void pi_wakeup_handler(void)
224 struct kvm_vcpu *vcpu;
225 int cpu = smp_processor_id();
227 spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
228 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
230 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
232 if (pi_test_on(pi_desc))
235 spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
238 void __init pi_init_cpu(int cpu)
240 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
241 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
244 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
246 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
248 return pi_test_on(pi_desc) ||
249 (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
254 * Bail out of the block loop if the VM has an assigned
255 * device, but the blocking vCPU didn't reconfigure the
256 * PI.NV to the wakeup vector, i.e. the assigned device
257 * came along after the initial check in pi_pre_block().
259 void vmx_pi_start_assignment(struct kvm *kvm)
261 if (!irq_remapping_cap(IRQ_POSTING_CAP))
264 kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
268 * pi_update_irte - set IRTE for Posted-Interrupts
271 * @host_irq: host irq of the interrupt
272 * @guest_irq: gsi of the interrupt
273 * @set: set or unset PI
274 * returns 0 on success, < 0 on failure
276 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
279 struct kvm_kernel_irq_routing_entry *e;
280 struct kvm_irq_routing_table *irq_rt;
281 struct kvm_lapic_irq irq;
282 struct kvm_vcpu *vcpu;
283 struct vcpu_data vcpu_info;
286 if (!vmx_can_use_vtd_pi(kvm))
289 idx = srcu_read_lock(&kvm->irq_srcu);
290 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
291 if (guest_irq >= irq_rt->nr_rt_entries ||
292 hlist_empty(&irq_rt->map[guest_irq])) {
293 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
294 guest_irq, irq_rt->nr_rt_entries);
298 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
299 if (e->type != KVM_IRQ_ROUTING_MSI)
302 * VT-d PI cannot support posting multicast/broadcast
303 * interrupts to a vCPU, we still use interrupt remapping
304 * for these kind of interrupts.
306 * For lowest-priority interrupts, we only support
307 * those with single CPU as the destination, e.g. user
308 * configures the interrupts via /proc/irq or uses
309 * irqbalance to make the interrupts single-CPU.
311 * We will support full lowest-priority interrupt later.
313 * In addition, we can only inject generic interrupts using
314 * the PI mechanism, refuse to route others through it.
317 kvm_set_msi_irq(kvm, e, &irq);
318 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
319 !kvm_irq_is_postable(&irq)) {
321 * Make sure the IRTE is in remapped mode if
322 * we don't handle it in posted mode.
324 ret = irq_set_vcpu_affinity(host_irq, NULL);
327 "failed to back to remapped mode, irq: %u\n",
335 vcpu_info.pi_desc_addr = __pa(&to_vmx(vcpu)->pi_desc);
336 vcpu_info.vector = irq.vector;
338 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
339 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
342 ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
344 ret = irq_set_vcpu_affinity(host_irq, NULL);
347 printk(KERN_INFO "%s: failed to update PI IRTE\n",
355 srcu_read_unlock(&kvm->irq_srcu, idx);