KVM: avoid warning on s390 in mark_page_dirty
[linux-2.6-microblaze.git] / arch / x86 / kvm / vmx / posted_intr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm_host.h>
3
4 #include <asm/irq_remapping.h>
5 #include <asm/cpu.h>
6
7 #include "lapic.h"
8 #include "irq.h"
9 #include "posted_intr.h"
10 #include "trace.h"
11 #include "vmx.h"
12
13 /*
14  * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler()
15  * when a WAKEUP_VECTOR interrupted is posted.  vCPUs are added to the list when
16  * the vCPU is scheduled out and is blocking (e.g. in HLT) with IRQs enabled.
17  * The vCPUs posted interrupt descriptor is updated at the same time to set its
18  * notification vector to WAKEUP_VECTOR, so that posted interrupt from devices
19  * wake the target vCPUs.  vCPUs are removed from the list and the notification
20  * vector is reset when the vCPU is scheduled in.
21  */
22 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
23 /*
24  * Protect the per-CPU list with a per-CPU spinlock to handle task migration.
25  * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the
26  * ->sched_in() path will need to take the vCPU off the list of the _previous_
27  * CPU.  IRQs must be disabled when taking this lock, otherwise deadlock will
28  * occur if a wakeup IRQ arrives and attempts to acquire the lock.
29  */
30 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
31
32 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
33 {
34         return &(to_vmx(vcpu)->pi_desc);
35 }
36
37 static int pi_try_set_control(struct pi_desc *pi_desc, u64 old, u64 new)
38 {
39         /*
40          * PID.ON can be set at any time by a different vCPU or by hardware,
41          * e.g. a device.  PID.control must be written atomically, and the
42          * update must be retried with a fresh snapshot an ON change causes
43          * the cmpxchg to fail.
44          */
45         if (cmpxchg64(&pi_desc->control, old, new) != old)
46                 return -EBUSY;
47
48         return 0;
49 }
50
51 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
52 {
53         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
54         struct pi_desc old, new;
55         unsigned int dest;
56
57         /*
58          * To simplify hot-plug and dynamic toggling of APICv, keep PI.NDST and
59          * PI.SN up-to-date even if there is no assigned device or if APICv is
60          * deactivated due to a dynamic inhibit bit, e.g. for Hyper-V's SyncIC.
61          */
62         if (!enable_apicv || !lapic_in_kernel(vcpu))
63                 return;
64
65         /* Nothing to do if PI.SN and PI.NDST both have the desired value. */
66         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
67                 return;
68
69         /*
70          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
71          * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
72          * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
73          * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
74          * correctly.
75          */
76         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
77                 pi_clear_sn(pi_desc);
78                 goto after_clear_sn;
79         }
80
81         /* The full case.  Set the new destination and clear SN. */
82         dest = cpu_physical_id(cpu);
83         if (!x2apic_mode)
84                 dest = (dest << 8) & 0xFF00;
85
86         do {
87                 old.control = new.control = READ_ONCE(pi_desc->control);
88
89                 new.ndst = dest;
90                 new.sn = 0;
91         } while (pi_try_set_control(pi_desc, old.control, new.control));
92
93 after_clear_sn:
94
95         /*
96          * Clear SN before reading the bitmap.  The VT-d firmware
97          * writes the bitmap and reads SN atomically (5.2.3 in the
98          * spec), so it doesn't really have a memory barrier that
99          * pairs with this, but we cannot do that and we need one.
100          */
101         smp_mb__after_atomic();
102
103         if (!pi_is_pir_empty(pi_desc))
104                 pi_set_on(pi_desc);
105 }
106
107 static bool vmx_can_use_vtd_pi(struct kvm *kvm)
108 {
109         return irqchip_in_kernel(kvm) && enable_apicv &&
110                 kvm_arch_has_assigned_device(kvm) &&
111                 irq_remapping_cap(IRQ_POSTING_CAP);
112 }
113
114 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
115 {
116         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
117
118         if (!vmx_can_use_vtd_pi(vcpu->kvm))
119                 return;
120
121         /* Set SN when the vCPU is preempted */
122         if (vcpu->preempted)
123                 pi_set_sn(pi_desc);
124 }
125
126 static void __pi_post_block(struct kvm_vcpu *vcpu)
127 {
128         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
129         struct pi_desc old, new;
130         unsigned int dest;
131
132         /*
133          * Remove the vCPU from the wakeup list of the _previous_ pCPU, which
134          * will not be the same as the current pCPU if the task was migrated.
135          */
136         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
137         list_del(&vcpu->blocked_vcpu_list);
138         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
139
140         dest = cpu_physical_id(vcpu->cpu);
141         if (!x2apic_mode)
142                 dest = (dest << 8) & 0xFF00;
143
144         WARN(pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR,
145              "Wakeup handler not enabled while the vCPU was blocking");
146
147         do {
148                 old.control = new.control = READ_ONCE(pi_desc->control);
149
150                 new.ndst = dest;
151
152                 /* set 'NV' to 'notification vector' */
153                 new.nv = POSTED_INTR_VECTOR;
154         } while (pi_try_set_control(pi_desc, old.control, new.control));
155
156         vcpu->pre_pcpu = -1;
157 }
158
159 /*
160  * This routine does the following things for vCPU which is going
161  * to be blocked if VT-d PI is enabled.
162  * - Store the vCPU to the wakeup list, so when interrupts happen
163  *   we can find the right vCPU to wake up.
164  * - Change the Posted-interrupt descriptor as below:
165  *      'NV' <-- POSTED_INTR_WAKEUP_VECTOR
166  * - If 'ON' is set during this process, which means at least one
167  *   interrupt is posted for this vCPU, we cannot block it, in
168  *   this case, return 1, otherwise, return 0.
169  *
170  */
171 int pi_pre_block(struct kvm_vcpu *vcpu)
172 {
173         struct pi_desc old, new;
174         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
175         unsigned long flags;
176
177         if (!vmx_can_use_vtd_pi(vcpu->kvm) ||
178             vmx_interrupt_blocked(vcpu))
179                 return 0;
180
181         local_irq_save(flags);
182
183         vcpu->pre_pcpu = vcpu->cpu;
184         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
185         list_add_tail(&vcpu->blocked_vcpu_list,
186                       &per_cpu(blocked_vcpu_on_cpu, vcpu->cpu));
187         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->cpu));
188
189         WARN(pi_desc->sn == 1,
190              "Posted Interrupt Suppress Notification set before blocking");
191
192         do {
193                 old.control = new.control = READ_ONCE(pi_desc->control);
194
195                 /* set 'NV' to 'wakeup vector' */
196                 new.nv = POSTED_INTR_WAKEUP_VECTOR;
197         } while (pi_try_set_control(pi_desc, old.control, new.control));
198
199         /* We should not block the vCPU if an interrupt is posted for it.  */
200         if (pi_test_on(pi_desc))
201                 __pi_post_block(vcpu);
202
203         local_irq_restore(flags);
204         return (vcpu->pre_pcpu == -1);
205 }
206
207 void pi_post_block(struct kvm_vcpu *vcpu)
208 {
209         unsigned long flags;
210
211         if (vcpu->pre_pcpu == -1)
212                 return;
213
214         local_irq_save(flags);
215         __pi_post_block(vcpu);
216         local_irq_restore(flags);
217 }
218
219 /*
220  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
221  */
222 void pi_wakeup_handler(void)
223 {
224         struct kvm_vcpu *vcpu;
225         int cpu = smp_processor_id();
226
227         spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
228         list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
229                         blocked_vcpu_list) {
230                 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
231
232                 if (pi_test_on(pi_desc))
233                         kvm_vcpu_kick(vcpu);
234         }
235         spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
236 }
237
238 void __init pi_init_cpu(int cpu)
239 {
240         INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
241         spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
242 }
243
244 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
245 {
246         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
247
248         return pi_test_on(pi_desc) ||
249                 (pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
250 }
251
252
253 /*
254  * Bail out of the block loop if the VM has an assigned
255  * device, but the blocking vCPU didn't reconfigure the
256  * PI.NV to the wakeup vector, i.e. the assigned device
257  * came along after the initial check in pi_pre_block().
258  */
259 void vmx_pi_start_assignment(struct kvm *kvm)
260 {
261         if (!irq_remapping_cap(IRQ_POSTING_CAP))
262                 return;
263
264         kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
265 }
266
267 /*
268  * pi_update_irte - set IRTE for Posted-Interrupts
269  *
270  * @kvm: kvm
271  * @host_irq: host irq of the interrupt
272  * @guest_irq: gsi of the interrupt
273  * @set: set or unset PI
274  * returns 0 on success, < 0 on failure
275  */
276 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
277                    bool set)
278 {
279         struct kvm_kernel_irq_routing_entry *e;
280         struct kvm_irq_routing_table *irq_rt;
281         struct kvm_lapic_irq irq;
282         struct kvm_vcpu *vcpu;
283         struct vcpu_data vcpu_info;
284         int idx, ret = 0;
285
286         if (!vmx_can_use_vtd_pi(kvm))
287                 return 0;
288
289         idx = srcu_read_lock(&kvm->irq_srcu);
290         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
291         if (guest_irq >= irq_rt->nr_rt_entries ||
292             hlist_empty(&irq_rt->map[guest_irq])) {
293                 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
294                              guest_irq, irq_rt->nr_rt_entries);
295                 goto out;
296         }
297
298         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
299                 if (e->type != KVM_IRQ_ROUTING_MSI)
300                         continue;
301                 /*
302                  * VT-d PI cannot support posting multicast/broadcast
303                  * interrupts to a vCPU, we still use interrupt remapping
304                  * for these kind of interrupts.
305                  *
306                  * For lowest-priority interrupts, we only support
307                  * those with single CPU as the destination, e.g. user
308                  * configures the interrupts via /proc/irq or uses
309                  * irqbalance to make the interrupts single-CPU.
310                  *
311                  * We will support full lowest-priority interrupt later.
312                  *
313                  * In addition, we can only inject generic interrupts using
314                  * the PI mechanism, refuse to route others through it.
315                  */
316
317                 kvm_set_msi_irq(kvm, e, &irq);
318                 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
319                     !kvm_irq_is_postable(&irq)) {
320                         /*
321                          * Make sure the IRTE is in remapped mode if
322                          * we don't handle it in posted mode.
323                          */
324                         ret = irq_set_vcpu_affinity(host_irq, NULL);
325                         if (ret < 0) {
326                                 printk(KERN_INFO
327                                    "failed to back to remapped mode, irq: %u\n",
328                                    host_irq);
329                                 goto out;
330                         }
331
332                         continue;
333                 }
334
335                 vcpu_info.pi_desc_addr = __pa(&to_vmx(vcpu)->pi_desc);
336                 vcpu_info.vector = irq.vector;
337
338                 trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
339                                 vcpu_info.vector, vcpu_info.pi_desc_addr, set);
340
341                 if (set)
342                         ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
343                 else
344                         ret = irq_set_vcpu_affinity(host_irq, NULL);
345
346                 if (ret < 0) {
347                         printk(KERN_INFO "%s: failed to update PI IRTE\n",
348                                         __func__);
349                         goto out;
350                 }
351         }
352
353         ret = 0;
354 out:
355         srcu_read_unlock(&kvm->irq_srcu, idx);
356         return ret;
357 }