x86/microcode: Fix return value for microcode late loading
[linux-2.6-microblaze.git] / virt / kvm / arm / vgic / vgic-mmio.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VGIC MMIO handling functions
4  */
5
6 #include <linux/bitops.h>
7 #include <linux/bsearch.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/kvm.h>
11 #include <linux/kvm_host.h>
12 #include <kvm/iodev.h>
13 #include <kvm/arm_arch_timer.h>
14 #include <kvm/arm_vgic.h>
15
16 #include "vgic.h"
17 #include "vgic-mmio.h"
18
19 unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
20                                  gpa_t addr, unsigned int len)
21 {
22         return 0;
23 }
24
25 unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
26                                  gpa_t addr, unsigned int len)
27 {
28         return -1UL;
29 }
30
31 void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
32                         unsigned int len, unsigned long val)
33 {
34         /* Ignore */
35 }
36
37 int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38                                unsigned int len, unsigned long val)
39 {
40         /* Ignore */
41         return 0;
42 }
43
44 unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
45                                    gpa_t addr, unsigned int len)
46 {
47         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
48         u32 value = 0;
49         int i;
50
51         /* Loop over all IRQs affected by this read */
52         for (i = 0; i < len * 8; i++) {
53                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
54
55                 if (irq->group)
56                         value |= BIT(i);
57
58                 vgic_put_irq(vcpu->kvm, irq);
59         }
60
61         return value;
62 }
63
64 static void vgic_update_vsgi(struct vgic_irq *irq)
65 {
66         WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
67 }
68
69 void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
70                            unsigned int len, unsigned long val)
71 {
72         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
73         int i;
74         unsigned long flags;
75
76         for (i = 0; i < len * 8; i++) {
77                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
78
79                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
80                 irq->group = !!(val & BIT(i));
81                 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
82                         vgic_update_vsgi(irq);
83                         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
84                 } else {
85                         vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
86                 }
87
88                 vgic_put_irq(vcpu->kvm, irq);
89         }
90 }
91
92 /*
93  * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94  * of the enabled bit, so there is only one function for both here.
95  */
96 unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
97                                     gpa_t addr, unsigned int len)
98 {
99         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
100         u32 value = 0;
101         int i;
102
103         /* Loop over all IRQs affected by this read */
104         for (i = 0; i < len * 8; i++) {
105                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
106
107                 if (irq->enabled)
108                         value |= (1U << i);
109
110                 vgic_put_irq(vcpu->kvm, irq);
111         }
112
113         return value;
114 }
115
116 void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
117                              gpa_t addr, unsigned int len,
118                              unsigned long val)
119 {
120         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
121         int i;
122         unsigned long flags;
123
124         for_each_set_bit(i, &val, len * 8) {
125                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
126
127                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
128                 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
129                         if (!irq->enabled) {
130                                 struct irq_data *data;
131
132                                 irq->enabled = true;
133                                 data = &irq_to_desc(irq->host_irq)->irq_data;
134                                 while (irqd_irq_disabled(data))
135                                         enable_irq(irq->host_irq);
136                         }
137
138                         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139                         vgic_put_irq(vcpu->kvm, irq);
140
141                         continue;
142                 } else if (vgic_irq_is_mapped_level(irq)) {
143                         bool was_high = irq->line_level;
144
145                         /*
146                          * We need to update the state of the interrupt because
147                          * the guest might have changed the state of the device
148                          * while the interrupt was disabled at the VGIC level.
149                          */
150                         irq->line_level = vgic_get_phys_line_level(irq);
151                         /*
152                          * Deactivate the physical interrupt so the GIC will let
153                          * us know when it is asserted again.
154                          */
155                         if (!irq->active && was_high && !irq->line_level)
156                                 vgic_irq_set_phys_active(irq, false);
157                 }
158                 irq->enabled = true;
159                 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
160
161                 vgic_put_irq(vcpu->kvm, irq);
162         }
163 }
164
165 void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
166                              gpa_t addr, unsigned int len,
167                              unsigned long val)
168 {
169         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
170         int i;
171         unsigned long flags;
172
173         for_each_set_bit(i, &val, len * 8) {
174                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
175
176                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
177                 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
178                         disable_irq_nosync(irq->host_irq);
179
180                 irq->enabled = false;
181
182                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183                 vgic_put_irq(vcpu->kvm, irq);
184         }
185 }
186
187 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
188                                      gpa_t addr, unsigned int len)
189 {
190         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
191         u32 value = 0;
192         int i;
193
194         /* Loop over all IRQs affected by this read */
195         for (i = 0; i < len * 8; i++) {
196                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
197                 unsigned long flags;
198                 bool val;
199
200                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
201                 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
202                         int err;
203
204                         val = false;
205                         err = irq_get_irqchip_state(irq->host_irq,
206                                                     IRQCHIP_STATE_PENDING,
207                                                     &val);
208                         WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
209                 } else {
210                         val = irq_is_pending(irq);
211                 }
212
213                 value |= ((u32)val << i);
214                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
215
216                 vgic_put_irq(vcpu->kvm, irq);
217         }
218
219         return value;
220 }
221
222 /* Must be called with irq->irq_lock held */
223 static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
224                                  bool is_uaccess)
225 {
226         if (is_uaccess)
227                 return;
228
229         irq->pending_latch = true;
230         vgic_irq_set_phys_active(irq, true);
231 }
232
233 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
234 {
235         return (vgic_irq_is_sgi(irq->intid) &&
236                 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
237 }
238
239 void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
240                               gpa_t addr, unsigned int len,
241                               unsigned long val)
242 {
243         bool is_uaccess = !kvm_get_running_vcpu();
244         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
245         int i;
246         unsigned long flags;
247
248         for_each_set_bit(i, &val, len * 8) {
249                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
250
251                 /* GICD_ISPENDR0 SGI bits are WI */
252                 if (is_vgic_v2_sgi(vcpu, irq)) {
253                         vgic_put_irq(vcpu->kvm, irq);
254                         continue;
255                 }
256
257                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
258
259                 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
260                         /* HW SGI? Ask the GIC to inject it */
261                         int err;
262                         err = irq_set_irqchip_state(irq->host_irq,
263                                                     IRQCHIP_STATE_PENDING,
264                                                     true);
265                         WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
266
267                         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
268                         vgic_put_irq(vcpu->kvm, irq);
269
270                         continue;
271                 }
272
273                 if (irq->hw)
274                         vgic_hw_irq_spending(vcpu, irq, is_uaccess);
275                 else
276                         irq->pending_latch = true;
277                 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
278                 vgic_put_irq(vcpu->kvm, irq);
279         }
280 }
281
282 /* Must be called with irq->irq_lock held */
283 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
284                                  bool is_uaccess)
285 {
286         if (is_uaccess)
287                 return;
288
289         irq->pending_latch = false;
290
291         /*
292          * We don't want the guest to effectively mask the physical
293          * interrupt by doing a write to SPENDR followed by a write to
294          * CPENDR for HW interrupts, so we clear the active state on
295          * the physical side if the virtual interrupt is not active.
296          * This may lead to taking an additional interrupt on the
297          * host, but that should not be a problem as the worst that
298          * can happen is an additional vgic injection.  We also clear
299          * the pending state to maintain proper semantics for edge HW
300          * interrupts.
301          */
302         vgic_irq_set_phys_pending(irq, false);
303         if (!irq->active)
304                 vgic_irq_set_phys_active(irq, false);
305 }
306
307 void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
308                               gpa_t addr, unsigned int len,
309                               unsigned long val)
310 {
311         bool is_uaccess = !kvm_get_running_vcpu();
312         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
313         int i;
314         unsigned long flags;
315
316         for_each_set_bit(i, &val, len * 8) {
317                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
318
319                 /* GICD_ICPENDR0 SGI bits are WI */
320                 if (is_vgic_v2_sgi(vcpu, irq)) {
321                         vgic_put_irq(vcpu->kvm, irq);
322                         continue;
323                 }
324
325                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
326
327                 if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
328                         /* HW SGI? Ask the GIC to clear its pending bit */
329                         int err;
330                         err = irq_set_irqchip_state(irq->host_irq,
331                                                     IRQCHIP_STATE_PENDING,
332                                                     false);
333                         WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
334
335                         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
336                         vgic_put_irq(vcpu->kvm, irq);
337
338                         continue;
339                 }
340
341                 if (irq->hw)
342                         vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
343                 else
344                         irq->pending_latch = false;
345
346                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
347                 vgic_put_irq(vcpu->kvm, irq);
348         }
349 }
350
351 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
352                                     gpa_t addr, unsigned int len)
353 {
354         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
355         u32 value = 0;
356         int i;
357
358         /* Loop over all IRQs affected by this read */
359         for (i = 0; i < len * 8; i++) {
360                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
361
362                 if (irq->active)
363                         value |= (1U << i);
364
365                 vgic_put_irq(vcpu->kvm, irq);
366         }
367
368         return value;
369 }
370
371 /* Must be called with irq->irq_lock held */
372 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
373                                       bool active, bool is_uaccess)
374 {
375         if (is_uaccess)
376                 return;
377
378         irq->active = active;
379         vgic_irq_set_phys_active(irq, active);
380 }
381
382 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
383                                     bool active)
384 {
385         unsigned long flags;
386         struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
387
388         raw_spin_lock_irqsave(&irq->irq_lock, flags);
389
390         if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
391                 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
392         } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
393                 /*
394                  * GICv4.1 VSGI feature doesn't track an active state,
395                  * so let's not kid ourselves, there is nothing we can
396                  * do here.
397                  */
398                 irq->active = false;
399         } else {
400                 u32 model = vcpu->kvm->arch.vgic.vgic_model;
401                 u8 active_source;
402
403                 irq->active = active;
404
405                 /*
406                  * The GICv2 architecture indicates that the source CPUID for
407                  * an SGI should be provided during an EOI which implies that
408                  * the active state is stored somewhere, but at the same time
409                  * this state is not architecturally exposed anywhere and we
410                  * have no way of knowing the right source.
411                  *
412                  * This may lead to a VCPU not being able to receive
413                  * additional instances of a particular SGI after migration
414                  * for a GICv2 VM on some GIC implementations.  Oh well.
415                  */
416                 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
417
418                 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
419                     active && vgic_irq_is_sgi(irq->intid))
420                         irq->active_source = active_source;
421         }
422
423         if (irq->active)
424                 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
425         else
426                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
427 }
428
429 /*
430  * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
431  * is not queued on some running VCPU's LRs, because then the change to the
432  * active state can be overwritten when the VCPU's state is synced coming back
433  * from the guest.
434  *
435  * For shared interrupts, we have to stop all the VCPUs because interrupts can
436  * be migrated while we don't hold the IRQ locks and we don't want to be
437  * chasing moving targets.
438  *
439  * For private interrupts we don't have to do anything because userspace
440  * accesses to the VGIC state already require all VCPUs to be stopped, and
441  * only the VCPU itself can modify its private interrupts active state, which
442  * guarantees that the VCPU is not running.
443  */
444 static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
445 {
446         if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
447             intid > VGIC_NR_PRIVATE_IRQS)
448                 kvm_arm_halt_guest(vcpu->kvm);
449 }
450
451 /* See vgic_change_active_prepare */
452 static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
453 {
454         if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
455             intid > VGIC_NR_PRIVATE_IRQS)
456                 kvm_arm_resume_guest(vcpu->kvm);
457 }
458
459 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
460                                       gpa_t addr, unsigned int len,
461                                       unsigned long val)
462 {
463         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
464         int i;
465
466         for_each_set_bit(i, &val, len * 8) {
467                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
468                 vgic_mmio_change_active(vcpu, irq, false);
469                 vgic_put_irq(vcpu->kvm, irq);
470         }
471 }
472
473 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
474                              gpa_t addr, unsigned int len,
475                              unsigned long val)
476 {
477         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
478
479         mutex_lock(&vcpu->kvm->lock);
480         vgic_change_active_prepare(vcpu, intid);
481
482         __vgic_mmio_write_cactive(vcpu, addr, len, val);
483
484         vgic_change_active_finish(vcpu, intid);
485         mutex_unlock(&vcpu->kvm->lock);
486 }
487
488 int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
489                                      gpa_t addr, unsigned int len,
490                                      unsigned long val)
491 {
492         __vgic_mmio_write_cactive(vcpu, addr, len, val);
493         return 0;
494 }
495
496 static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
497                                       gpa_t addr, unsigned int len,
498                                       unsigned long val)
499 {
500         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
501         int i;
502
503         for_each_set_bit(i, &val, len * 8) {
504                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
505                 vgic_mmio_change_active(vcpu, irq, true);
506                 vgic_put_irq(vcpu->kvm, irq);
507         }
508 }
509
510 void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
511                              gpa_t addr, unsigned int len,
512                              unsigned long val)
513 {
514         u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
515
516         mutex_lock(&vcpu->kvm->lock);
517         vgic_change_active_prepare(vcpu, intid);
518
519         __vgic_mmio_write_sactive(vcpu, addr, len, val);
520
521         vgic_change_active_finish(vcpu, intid);
522         mutex_unlock(&vcpu->kvm->lock);
523 }
524
525 int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
526                                      gpa_t addr, unsigned int len,
527                                      unsigned long val)
528 {
529         __vgic_mmio_write_sactive(vcpu, addr, len, val);
530         return 0;
531 }
532
533 unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
534                                       gpa_t addr, unsigned int len)
535 {
536         u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
537         int i;
538         u64 val = 0;
539
540         for (i = 0; i < len; i++) {
541                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
542
543                 val |= (u64)irq->priority << (i * 8);
544
545                 vgic_put_irq(vcpu->kvm, irq);
546         }
547
548         return val;
549 }
550
551 /*
552  * We currently don't handle changing the priority of an interrupt that
553  * is already pending on a VCPU. If there is a need for this, we would
554  * need to make this VCPU exit and re-evaluate the priorities, potentially
555  * leading to this interrupt getting presented now to the guest (if it has
556  * been masked by the priority mask before).
557  */
558 void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
559                               gpa_t addr, unsigned int len,
560                               unsigned long val)
561 {
562         u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
563         int i;
564         unsigned long flags;
565
566         for (i = 0; i < len; i++) {
567                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
568
569                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
570                 /* Narrow the priority range to what we actually support */
571                 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
572                 if (irq->hw && vgic_irq_is_sgi(irq->intid))
573                         vgic_update_vsgi(irq);
574                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
575
576                 vgic_put_irq(vcpu->kvm, irq);
577         }
578 }
579
580 unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
581                                     gpa_t addr, unsigned int len)
582 {
583         u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
584         u32 value = 0;
585         int i;
586
587         for (i = 0; i < len * 4; i++) {
588                 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
589
590                 if (irq->config == VGIC_CONFIG_EDGE)
591                         value |= (2U << (i * 2));
592
593                 vgic_put_irq(vcpu->kvm, irq);
594         }
595
596         return value;
597 }
598
599 void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
600                             gpa_t addr, unsigned int len,
601                             unsigned long val)
602 {
603         u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
604         int i;
605         unsigned long flags;
606
607         for (i = 0; i < len * 4; i++) {
608                 struct vgic_irq *irq;
609
610                 /*
611                  * The configuration cannot be changed for SGIs in general,
612                  * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
613                  * code relies on PPIs being level triggered, so we also
614                  * make them read-only here.
615                  */
616                 if (intid + i < VGIC_NR_PRIVATE_IRQS)
617                         continue;
618
619                 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
620                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
621
622                 if (test_bit(i * 2 + 1, &val))
623                         irq->config = VGIC_CONFIG_EDGE;
624                 else
625                         irq->config = VGIC_CONFIG_LEVEL;
626
627                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
628                 vgic_put_irq(vcpu->kvm, irq);
629         }
630 }
631
632 u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
633 {
634         int i;
635         u64 val = 0;
636         int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
637
638         for (i = 0; i < 32; i++) {
639                 struct vgic_irq *irq;
640
641                 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
642                         continue;
643
644                 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
645                 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
646                         val |= (1U << i);
647
648                 vgic_put_irq(vcpu->kvm, irq);
649         }
650
651         return val;
652 }
653
654 void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
655                                     const u64 val)
656 {
657         int i;
658         int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
659         unsigned long flags;
660
661         for (i = 0; i < 32; i++) {
662                 struct vgic_irq *irq;
663                 bool new_level;
664
665                 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
666                         continue;
667
668                 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
669
670                 /*
671                  * Line level is set irrespective of irq type
672                  * (level or edge) to avoid dependency that VM should
673                  * restore irq config before line level.
674                  */
675                 new_level = !!(val & (1U << i));
676                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
677                 irq->line_level = new_level;
678                 if (new_level)
679                         vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
680                 else
681                         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
682
683                 vgic_put_irq(vcpu->kvm, irq);
684         }
685 }
686
687 static int match_region(const void *key, const void *elt)
688 {
689         const unsigned int offset = (unsigned long)key;
690         const struct vgic_register_region *region = elt;
691
692         if (offset < region->reg_offset)
693                 return -1;
694
695         if (offset >= region->reg_offset + region->len)
696                 return 1;
697
698         return 0;
699 }
700
701 const struct vgic_register_region *
702 vgic_find_mmio_region(const struct vgic_register_region *regions,
703                       int nr_regions, unsigned int offset)
704 {
705         return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
706                        sizeof(regions[0]), match_region);
707 }
708
709 void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
710 {
711         if (kvm_vgic_global_state.type == VGIC_V2)
712                 vgic_v2_set_vmcr(vcpu, vmcr);
713         else
714                 vgic_v3_set_vmcr(vcpu, vmcr);
715 }
716
717 void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
718 {
719         if (kvm_vgic_global_state.type == VGIC_V2)
720                 vgic_v2_get_vmcr(vcpu, vmcr);
721         else
722                 vgic_v3_get_vmcr(vcpu, vmcr);
723 }
724
725 /*
726  * kvm_mmio_read_buf() returns a value in a format where it can be converted
727  * to a byte array and be directly observed as the guest wanted it to appear
728  * in memory if it had done the store itself, which is LE for the GIC, as the
729  * guest knows the GIC is always LE.
730  *
731  * We convert this value to the CPUs native format to deal with it as a data
732  * value.
733  */
734 unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
735 {
736         unsigned long data = kvm_mmio_read_buf(val, len);
737
738         switch (len) {
739         case 1:
740                 return data;
741         case 2:
742                 return le16_to_cpu(data);
743         case 4:
744                 return le32_to_cpu(data);
745         default:
746                 return le64_to_cpu(data);
747         }
748 }
749
750 /*
751  * kvm_mmio_write_buf() expects a value in a format such that if converted to
752  * a byte array it is observed as the guest would see it if it could perform
753  * the load directly.  Since the GIC is LE, and the guest knows this, the
754  * guest expects a value in little endian format.
755  *
756  * We convert the data value from the CPUs native format to LE so that the
757  * value is returned in the proper format.
758  */
759 void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
760                                 unsigned long data)
761 {
762         switch (len) {
763         case 1:
764                 break;
765         case 2:
766                 data = cpu_to_le16(data);
767                 break;
768         case 4:
769                 data = cpu_to_le32(data);
770                 break;
771         default:
772                 data = cpu_to_le64(data);
773         }
774
775         kvm_mmio_write_buf(buf, len, data);
776 }
777
778 static
779 struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
780 {
781         return container_of(dev, struct vgic_io_device, dev);
782 }
783
784 static bool check_region(const struct kvm *kvm,
785                          const struct vgic_register_region *region,
786                          gpa_t addr, int len)
787 {
788         int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
789
790         switch (len) {
791         case sizeof(u8):
792                 flags = VGIC_ACCESS_8bit;
793                 break;
794         case sizeof(u32):
795                 flags = VGIC_ACCESS_32bit;
796                 break;
797         case sizeof(u64):
798                 flags = VGIC_ACCESS_64bit;
799                 break;
800         default:
801                 return false;
802         }
803
804         if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
805                 if (!region->bits_per_irq)
806                         return true;
807
808                 /* Do we access a non-allocated IRQ? */
809                 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
810         }
811
812         return false;
813 }
814
815 const struct vgic_register_region *
816 vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
817                      gpa_t addr, int len)
818 {
819         const struct vgic_register_region *region;
820
821         region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
822                                        addr - iodev->base_addr);
823         if (!region || !check_region(vcpu->kvm, region, addr, len))
824                 return NULL;
825
826         return region;
827 }
828
829 static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
830                              gpa_t addr, u32 *val)
831 {
832         struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
833         const struct vgic_register_region *region;
834         struct kvm_vcpu *r_vcpu;
835
836         region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
837         if (!region) {
838                 *val = 0;
839                 return 0;
840         }
841
842         r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
843         if (region->uaccess_read)
844                 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
845         else
846                 *val = region->read(r_vcpu, addr, sizeof(u32));
847
848         return 0;
849 }
850
851 static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
852                               gpa_t addr, const u32 *val)
853 {
854         struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
855         const struct vgic_register_region *region;
856         struct kvm_vcpu *r_vcpu;
857
858         region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
859         if (!region)
860                 return 0;
861
862         r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
863         if (region->uaccess_write)
864                 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
865
866         region->write(r_vcpu, addr, sizeof(u32), *val);
867         return 0;
868 }
869
870 /*
871  * Userland access to VGIC registers.
872  */
873 int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
874                  bool is_write, int offset, u32 *val)
875 {
876         if (is_write)
877                 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
878         else
879                 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
880 }
881
882 static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
883                               gpa_t addr, int len, void *val)
884 {
885         struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
886         const struct vgic_register_region *region;
887         unsigned long data = 0;
888
889         region = vgic_get_mmio_region(vcpu, iodev, addr, len);
890         if (!region) {
891                 memset(val, 0, len);
892                 return 0;
893         }
894
895         switch (iodev->iodev_type) {
896         case IODEV_CPUIF:
897                 data = region->read(vcpu, addr, len);
898                 break;
899         case IODEV_DIST:
900                 data = region->read(vcpu, addr, len);
901                 break;
902         case IODEV_REDIST:
903                 data = region->read(iodev->redist_vcpu, addr, len);
904                 break;
905         case IODEV_ITS:
906                 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
907                 break;
908         }
909
910         vgic_data_host_to_mmio_bus(val, len, data);
911         return 0;
912 }
913
914 static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
915                                gpa_t addr, int len, const void *val)
916 {
917         struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
918         const struct vgic_register_region *region;
919         unsigned long data = vgic_data_mmio_bus_to_host(val, len);
920
921         region = vgic_get_mmio_region(vcpu, iodev, addr, len);
922         if (!region)
923                 return 0;
924
925         switch (iodev->iodev_type) {
926         case IODEV_CPUIF:
927                 region->write(vcpu, addr, len, data);
928                 break;
929         case IODEV_DIST:
930                 region->write(vcpu, addr, len, data);
931                 break;
932         case IODEV_REDIST:
933                 region->write(iodev->redist_vcpu, addr, len, data);
934                 break;
935         case IODEV_ITS:
936                 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
937                 break;
938         }
939
940         return 0;
941 }
942
943 struct kvm_io_device_ops kvm_io_gic_ops = {
944         .read = dispatch_mmio_read,
945         .write = dispatch_mmio_write,
946 };
947
948 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
949                              enum vgic_type type)
950 {
951         struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
952         int ret = 0;
953         unsigned int len;
954
955         switch (type) {
956         case VGIC_V2:
957                 len = vgic_v2_init_dist_iodev(io_device);
958                 break;
959         case VGIC_V3:
960                 len = vgic_v3_init_dist_iodev(io_device);
961                 break;
962         default:
963                 BUG_ON(1);
964         }
965
966         io_device->base_addr = dist_base_address;
967         io_device->iodev_type = IODEV_DIST;
968         io_device->redist_vcpu = NULL;
969
970         mutex_lock(&kvm->slots_lock);
971         ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
972                                       len, &io_device->dev);
973         mutex_unlock(&kvm->slots_lock);
974
975         return ret;
976 }