1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
22 #include <asm/irq_remapping.h>
30 /* AVIC GATAG is encoded using VM and VCPU IDs */
31 #define AVIC_VCPU_ID_BITS 8
32 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
34 #define AVIC_VM_ID_BITS 24
35 #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
36 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
38 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
39 (y & AVIC_VCPU_ID_MASK))
40 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
41 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
44 * This hash table is used to map VM_ID to a struct kvm_svm,
45 * when handling AMD IOMMU GALOG notification to schedule in
48 #define SVM_VM_DATA_HASH_BITS 8
49 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
50 static u32 next_vm_id = 0;
51 static bool next_vm_id_wrapped = 0;
52 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
55 * This is a wrapper of struct amd_iommu_ir_data.
57 struct amd_svm_iommu_ir {
58 struct list_head node; /* Used by SVM for per-vcpu ir_list */
59 void *data; /* Storing pointer to struct amd_ir_data */
64 * This function is called from IOMMU driver to notify
65 * SVM to schedule in a particular vCPU of a particular VM.
67 int avic_ga_log_notifier(u32 ga_tag)
70 struct kvm_svm *kvm_svm;
71 struct kvm_vcpu *vcpu = NULL;
72 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
73 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
75 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
76 trace_kvm_avic_ga_log(vm_id, vcpu_id);
78 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
79 hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
80 if (kvm_svm->avic_vm_id != vm_id)
82 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
85 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
88 * At this point, the IOMMU should have already set the pending
89 * bit in the vAPIC backing page. So, we just need to schedule
93 kvm_vcpu_wake_up(vcpu);
98 void avic_vm_destroy(struct kvm *kvm)
101 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
106 if (kvm_svm->avic_logical_id_table_page)
107 __free_page(kvm_svm->avic_logical_id_table_page);
108 if (kvm_svm->avic_physical_id_table_page)
109 __free_page(kvm_svm->avic_physical_id_table_page);
111 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
112 hash_del(&kvm_svm->hnode);
113 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
116 int avic_vm_init(struct kvm *kvm)
120 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
129 /* Allocating physical APIC ID table (4KB) */
130 p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
134 kvm_svm->avic_physical_id_table_page = p_page;
136 /* Allocating logical APIC ID table (4KB) */
137 l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
141 kvm_svm->avic_logical_id_table_page = l_page;
143 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
145 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
146 if (vm_id == 0) { /* id is 1-based, zero is not okay */
147 next_vm_id_wrapped = 1;
150 /* Is it still in use? Only possible if wrapped at least once */
151 if (next_vm_id_wrapped) {
152 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
153 if (k2->avic_vm_id == vm_id)
157 kvm_svm->avic_vm_id = vm_id;
158 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
159 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
164 avic_vm_destroy(kvm);
168 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
170 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
171 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
172 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
173 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
175 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
176 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
177 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
178 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
179 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
181 if (kvm_apicv_activated(svm->vcpu.kvm))
182 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
184 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
187 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
190 u64 *avic_physical_id_table;
191 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
193 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
196 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
198 return &avic_physical_id_table[index];
203 * AVIC hardware walks the nested page table to check permissions,
204 * but does not use the SPA address specified in the leaf page
205 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
206 * field of the VMCB. Therefore, we set up the
207 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
209 static int avic_alloc_access_page(struct kvm *kvm)
214 mutex_lock(&kvm->slots_lock);
216 if (kvm->arch.apic_access_memslot_enabled)
219 ret = __x86_set_memory_region(kvm,
220 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
221 APIC_DEFAULT_PHYS_BASE,
228 kvm->arch.apic_access_memslot_enabled = true;
230 mutex_unlock(&kvm->slots_lock);
234 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
236 u64 *entry, new_entry;
237 int id = vcpu->vcpu_id;
238 struct vcpu_svm *svm = to_svm(vcpu);
240 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
243 if (!vcpu->arch.apic->regs)
246 if (kvm_apicv_activated(vcpu->kvm)) {
249 ret = avic_alloc_access_page(vcpu->kvm);
254 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
256 /* Setting AVIC backing page address in the phy APIC ID table */
257 entry = avic_get_physical_id_entry(vcpu, id);
261 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
262 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
263 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
264 WRITE_ONCE(*entry, new_entry);
266 svm->avic_physical_id_cache = entry;
271 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
274 * Note, the vCPU could get migrated to a different pCPU at any point,
275 * which could result in signalling the wrong/previous pCPU. But if
276 * that happens the vCPU is guaranteed to do a VMRUN (after being
277 * migrated) and thus will process pending interrupts, i.e. a doorbell
278 * is not needed (and the spurious one is harmless).
280 int cpu = READ_ONCE(vcpu->cpu);
282 if (cpu != get_cpu())
283 wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
287 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
290 struct kvm_vcpu *vcpu;
294 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
295 * event. There's no need to signal doorbells, as hardware has handled
296 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
297 * since entered the guest will have processed pending IRQs at VMRUN.
299 kvm_for_each_vcpu(i, vcpu, kvm) {
300 if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
301 GET_APIC_DEST_FIELD(icrh),
302 icrl & APIC_DEST_MASK)) {
303 vcpu->arch.apic->irr_pending = true;
304 svm_complete_interrupt_delivery(vcpu,
305 icrl & APIC_MODE_MASK,
306 icrl & APIC_INT_LEVELTRIG,
307 icrl & APIC_VECTOR_MASK);
312 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
314 struct vcpu_svm *svm = to_svm(vcpu);
315 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
316 u32 icrl = svm->vmcb->control.exit_info_1;
317 u32 id = svm->vmcb->control.exit_info_2 >> 32;
318 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
319 struct kvm_lapic *apic = vcpu->arch.apic;
321 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
324 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
326 * Emulate IPIs that are not handled by AVIC hardware, which
327 * only virtualizes Fixed, Edge-Triggered INTRs. The exit is
328 * a trap, e.g. ICR holds the correct value and RIP has been
329 * advanced, KVM is responsible only for emulating the IPI.
330 * Sadly, hardware may sometimes leave the BUSY flag set, in
331 * which case KVM needs to emulate the ICR write as well in
332 * order to clear the BUSY flag.
334 if (icrl & APIC_ICR_BUSY)
335 kvm_apic_write_nodecode(vcpu, APIC_ICR);
337 kvm_apic_send_ipi(apic, icrl, icrh);
339 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
341 * At this point, we expect that the AVIC HW has already
342 * set the appropriate IRR bits on the valid target
343 * vcpus. So, we just need to kick the appropriate vcpu.
345 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
347 case AVIC_IPI_FAILURE_INVALID_TARGET:
349 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
350 WARN_ONCE(1, "Invalid backing page\n");
353 pr_err("Unknown IPI interception\n");
359 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu)
361 if (is_guest_mode(vcpu))
362 return APICV_INHIBIT_REASON_NESTED;
366 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
368 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
370 u32 *logical_apic_id_table;
371 int dlid = GET_APIC_LOGICAL_ID(ldr);
376 if (flat) { /* flat */
377 index = ffs(dlid) - 1;
380 } else { /* cluster */
381 int cluster = (dlid & 0xf0) >> 4;
382 int apic = ffs(dlid & 0x0f) - 1;
384 if ((apic < 0) || (apic > 7) ||
387 index = (cluster << 2) + apic;
390 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
392 return &logical_apic_id_table[index];
395 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
398 u32 *entry, new_entry;
400 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
401 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
405 new_entry = READ_ONCE(*entry);
406 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
407 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
408 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
409 WRITE_ONCE(*entry, new_entry);
414 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
416 struct vcpu_svm *svm = to_svm(vcpu);
417 bool flat = svm->dfr_reg == APIC_DFR_FLAT;
418 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
421 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
424 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
427 struct vcpu_svm *svm = to_svm(vcpu);
428 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
429 u32 id = kvm_xapic_id(vcpu->arch.apic);
431 if (ldr == svm->ldr_reg)
434 avic_invalidate_logical_id_entry(vcpu);
437 ret = avic_ldr_write(vcpu, id, ldr);
445 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
448 struct vcpu_svm *svm = to_svm(vcpu);
449 u32 id = kvm_xapic_id(vcpu->arch.apic);
451 if (vcpu->vcpu_id == id)
454 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
455 new = avic_get_physical_id_entry(vcpu, id);
459 /* We need to move physical_id_entry to new offset */
462 to_svm(vcpu)->avic_physical_id_cache = new;
465 * Also update the guest physical APIC ID in the logical
466 * APIC ID table entry if already setup the LDR.
469 avic_handle_ldr_update(vcpu);
474 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
476 struct vcpu_svm *svm = to_svm(vcpu);
477 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
479 if (svm->dfr_reg == dfr)
482 avic_invalidate_logical_id_entry(vcpu);
486 static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
488 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 &
489 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
493 if (avic_handle_apic_id_update(vcpu))
497 if (avic_handle_ldr_update(vcpu))
501 avic_handle_dfr_update(vcpu);
507 kvm_apic_write_nodecode(vcpu, offset);
511 static bool is_avic_unaccelerated_access_trap(u32 offset)
540 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
542 struct vcpu_svm *svm = to_svm(vcpu);
544 u32 offset = svm->vmcb->control.exit_info_1 &
545 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
546 u32 vector = svm->vmcb->control.exit_info_2 &
547 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
548 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
549 AVIC_UNACCEL_ACCESS_WRITE_MASK;
550 bool trap = is_avic_unaccelerated_access_trap(offset);
552 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
553 trap, write, vector);
556 WARN_ONCE(!write, "svm: Handling trap read.\n");
557 ret = avic_unaccel_trap_write(vcpu);
560 ret = kvm_emulate_instruction(vcpu, 0);
566 int avic_init_vcpu(struct vcpu_svm *svm)
569 struct kvm_vcpu *vcpu = &svm->vcpu;
571 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
574 ret = avic_init_backing_page(vcpu);
578 INIT_LIST_HEAD(&svm->ir_list);
579 spin_lock_init(&svm->ir_list_lock);
580 svm->dfr_reg = APIC_DFR_FLAT;
585 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
587 if (avic_handle_apic_id_update(vcpu) != 0)
589 avic_handle_dfr_update(vcpu);
590 avic_handle_ldr_update(vcpu);
593 static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
597 struct amd_svm_iommu_ir *ir;
598 struct vcpu_svm *svm = to_svm(vcpu);
600 if (!kvm_arch_has_assigned_device(vcpu->kvm))
604 * Here, we go through the per-vcpu ir_list to update all existing
605 * interrupt remapping table entry targeting this vcpu.
607 spin_lock_irqsave(&svm->ir_list_lock, flags);
609 if (list_empty(&svm->ir_list))
612 list_for_each_entry(ir, &svm->ir_list, node) {
614 ret = amd_iommu_activate_guest_mode(ir->data);
616 ret = amd_iommu_deactivate_guest_mode(ir->data);
621 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
625 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
628 struct amd_svm_iommu_ir *cur;
630 spin_lock_irqsave(&svm->ir_list_lock, flags);
631 list_for_each_entry(cur, &svm->ir_list, node) {
632 if (cur->data != pi->ir_data)
634 list_del(&cur->node);
638 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
641 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
645 struct amd_svm_iommu_ir *ir;
648 * In some cases, the existing irte is updated and re-set,
649 * so we need to check here if it's already been * added
652 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
653 struct kvm *kvm = svm->vcpu.kvm;
654 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
655 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
656 struct vcpu_svm *prev_svm;
663 prev_svm = to_svm(prev_vcpu);
664 svm_ir_list_del(prev_svm, pi);
668 * Allocating new amd_iommu_pi_data, which will get
669 * add to the per-vcpu ir_list.
671 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
676 ir->data = pi->ir_data;
678 spin_lock_irqsave(&svm->ir_list_lock, flags);
679 list_add(&ir->node, &svm->ir_list);
680 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
687 * The HW cannot support posting multicast/broadcast
688 * interrupts to a vCPU. So, we still use legacy interrupt
689 * remapping for these kind of interrupts.
691 * For lowest-priority interrupts, we only support
692 * those with single CPU as the destination, e.g. user
693 * configures the interrupts via /proc/irq or uses
694 * irqbalance to make the interrupts single-CPU.
697 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
698 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
700 struct kvm_lapic_irq irq;
701 struct kvm_vcpu *vcpu = NULL;
703 kvm_set_msi_irq(kvm, e, &irq);
705 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
706 !kvm_irq_is_postable(&irq)) {
707 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
708 __func__, irq.vector);
712 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
715 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
716 vcpu_info->vector = irq.vector;
722 * avic_pi_update_irte - set IRTE for Posted-Interrupts
725 * @host_irq: host irq of the interrupt
726 * @guest_irq: gsi of the interrupt
727 * @set: set or unset PI
728 * returns 0 on success, < 0 on failure
730 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
731 uint32_t guest_irq, bool set)
733 struct kvm_kernel_irq_routing_entry *e;
734 struct kvm_irq_routing_table *irq_rt;
737 if (!kvm_arch_has_assigned_device(kvm) ||
738 !irq_remapping_cap(IRQ_POSTING_CAP))
741 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
742 __func__, host_irq, guest_irq, set);
744 idx = srcu_read_lock(&kvm->irq_srcu);
745 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
747 if (guest_irq >= irq_rt->nr_rt_entries ||
748 hlist_empty(&irq_rt->map[guest_irq])) {
749 pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
750 guest_irq, irq_rt->nr_rt_entries);
754 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
755 struct vcpu_data vcpu_info;
756 struct vcpu_svm *svm = NULL;
758 if (e->type != KVM_IRQ_ROUTING_MSI)
762 * Here, we setup with legacy mode in the following cases:
763 * 1. When cannot target interrupt to a specific vcpu.
764 * 2. Unsetting posted interrupt.
765 * 3. APIC virtualization is disabled for the vcpu.
766 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
768 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
769 kvm_vcpu_apicv_active(&svm->vcpu)) {
770 struct amd_iommu_pi_data pi;
772 /* Try to enable guest_mode in IRTE */
773 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
775 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
777 pi.is_guest_mode = true;
778 pi.vcpu_data = &vcpu_info;
779 ret = irq_set_vcpu_affinity(host_irq, &pi);
782 * Here, we successfully setting up vcpu affinity in
783 * IOMMU guest mode. Now, we need to store the posted
784 * interrupt information in a per-vcpu ir_list so that
785 * we can reference to them directly when we update vcpu
786 * scheduling information in IOMMU irte.
788 if (!ret && pi.is_guest_mode)
789 svm_ir_list_add(svm, &pi);
791 /* Use legacy mode in IRTE */
792 struct amd_iommu_pi_data pi;
795 * Here, pi is used to:
796 * - Tell IOMMU to use legacy mode for this interrupt.
797 * - Retrieve ga_tag of prior interrupt remapping data.
800 pi.is_guest_mode = false;
801 ret = irq_set_vcpu_affinity(host_irq, &pi);
804 * Check if the posted interrupt was previously
805 * setup with the guest_mode by checking if the ga_tag
806 * was cached. If so, we need to clean up the per-vcpu
809 if (!ret && pi.prev_ga_tag) {
810 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
811 struct kvm_vcpu *vcpu;
813 vcpu = kvm_get_vcpu_by_id(kvm, id);
815 svm_ir_list_del(to_svm(vcpu), &pi);
820 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
821 e->gsi, vcpu_info.vector,
822 vcpu_info.pi_desc_addr, set);
826 pr_err("%s: failed to update PI IRTE\n", __func__);
833 srcu_read_unlock(&kvm->irq_srcu, idx);
837 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
839 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
840 BIT(APICV_INHIBIT_REASON_ABSENT) |
841 BIT(APICV_INHIBIT_REASON_HYPERV) |
842 BIT(APICV_INHIBIT_REASON_NESTED) |
843 BIT(APICV_INHIBIT_REASON_IRQWIN) |
844 BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
845 BIT(APICV_INHIBIT_REASON_X2APIC) |
846 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
847 BIT(APICV_INHIBIT_REASON_SEV);
849 return supported & BIT(reason);
854 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
858 struct amd_svm_iommu_ir *ir;
859 struct vcpu_svm *svm = to_svm(vcpu);
861 if (!kvm_arch_has_assigned_device(vcpu->kvm))
865 * Here, we go through the per-vcpu ir_list to update all existing
866 * interrupt remapping table entry targeting this vcpu.
868 spin_lock_irqsave(&svm->ir_list_lock, flags);
870 if (list_empty(&svm->ir_list))
873 list_for_each_entry(ir, &svm->ir_list, node) {
874 ret = amd_iommu_update_ga(cpu, r, ir->data);
879 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
883 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
886 int h_physical_id = kvm_cpu_get_apicid(cpu);
887 struct vcpu_svm *svm = to_svm(vcpu);
889 lockdep_assert_preemption_disabled();
891 if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
895 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
896 * is being scheduled in after being preempted. The CPU entries in the
897 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
898 * If the vCPU was migrated, its new CPU value will be stuffed when the
901 if (kvm_vcpu_is_blocking(vcpu))
904 entry = READ_ONCE(*(svm->avic_physical_id_cache));
905 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
907 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
908 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
909 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
911 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
912 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
915 void __avic_vcpu_put(struct kvm_vcpu *vcpu)
918 struct vcpu_svm *svm = to_svm(vcpu);
920 lockdep_assert_preemption_disabled();
922 entry = READ_ONCE(*(svm->avic_physical_id_cache));
924 /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
925 if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
928 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
930 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
931 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
934 static void avic_vcpu_load(struct kvm_vcpu *vcpu)
938 WARN_ON(cpu != vcpu->cpu);
940 __avic_vcpu_load(vcpu, cpu);
945 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
949 __avic_vcpu_put(vcpu);
954 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
956 struct vcpu_svm *svm = to_svm(vcpu);
957 struct vmcb *vmcb = svm->vmcb01.ptr;
958 bool activated = kvm_vcpu_apicv_active(vcpu);
965 * During AVIC temporary deactivation, guest could update
966 * APIC ID, DFR and LDR registers, which would not be trapped
967 * by avic_unaccelerated_access_interception(). In this case,
968 * we need to check and update the AVIC logical APIC ID table
969 * accordingly before re-activating.
971 avic_apicv_post_state_restore(vcpu);
972 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
974 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
976 vmcb_mark_dirty(vmcb, VMCB_AVIC);
979 avic_vcpu_load(vcpu);
983 avic_set_pi_irte_mode(vcpu, activated);
986 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
988 if (!kvm_vcpu_apicv_active(vcpu))
992 * Unload the AVIC when the vCPU is about to block, _before_
993 * the vCPU actually blocks.
995 * Any IRQs that arrive before IsRunning=0 will not cause an
996 * incomplete IPI vmexit on the source, therefore vIRR will also
997 * be checked by kvm_vcpu_check_block() before blocking. The
998 * memory barrier implicit in set_current_state orders writing
999 * IsRunning=0 before reading the vIRR. The processor needs a
1000 * matching memory barrier on interrupt delivery between writing
1001 * IRR and reading IsRunning; the lack of this barrier might be
1002 * the cause of errata #1235).
1004 avic_vcpu_put(vcpu);
1007 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
1009 if (!kvm_vcpu_apicv_active(vcpu))
1012 avic_vcpu_load(vcpu);