1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
22 #include <asm/irq_remapping.h>
30 /* AVIC GATAG is encoded using VM and VCPU IDs */
31 #define AVIC_VCPU_ID_BITS 8
32 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
34 #define AVIC_VM_ID_BITS 24
35 #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
36 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
38 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
39 (y & AVIC_VCPU_ID_MASK))
40 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
41 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
44 * This hash table is used to map VM_ID to a struct kvm_svm,
45 * when handling AMD IOMMU GALOG notification to schedule in
48 #define SVM_VM_DATA_HASH_BITS 8
49 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
50 static u32 next_vm_id = 0;
51 static bool next_vm_id_wrapped = 0;
52 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
55 * This is a wrapper of struct amd_iommu_ir_data.
57 struct amd_svm_iommu_ir {
58 struct list_head node; /* Used by SVM for per-vcpu ir_list */
59 void *data; /* Storing pointer to struct amd_ir_data */
64 * This function is called from IOMMU driver to notify
65 * SVM to schedule in a particular vCPU of a particular VM.
67 int avic_ga_log_notifier(u32 ga_tag)
70 struct kvm_svm *kvm_svm;
71 struct kvm_vcpu *vcpu = NULL;
72 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
73 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
75 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
76 trace_kvm_avic_ga_log(vm_id, vcpu_id);
78 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
79 hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
80 if (kvm_svm->avic_vm_id != vm_id)
82 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
85 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
88 * At this point, the IOMMU should have already set the pending
89 * bit in the vAPIC backing page. So, we just need to schedule
93 kvm_vcpu_wake_up(vcpu);
98 void avic_vm_destroy(struct kvm *kvm)
101 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
106 if (kvm_svm->avic_logical_id_table_page)
107 __free_page(kvm_svm->avic_logical_id_table_page);
108 if (kvm_svm->avic_physical_id_table_page)
109 __free_page(kvm_svm->avic_physical_id_table_page);
111 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
112 hash_del(&kvm_svm->hnode);
113 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
116 int avic_vm_init(struct kvm *kvm)
120 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
129 /* Allocating physical APIC ID table (4KB) */
130 p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
134 kvm_svm->avic_physical_id_table_page = p_page;
136 /* Allocating logical APIC ID table (4KB) */
137 l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
141 kvm_svm->avic_logical_id_table_page = l_page;
143 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
145 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
146 if (vm_id == 0) { /* id is 1-based, zero is not okay */
147 next_vm_id_wrapped = 1;
150 /* Is it still in use? Only possible if wrapped at least once */
151 if (next_vm_id_wrapped) {
152 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
153 if (k2->avic_vm_id == vm_id)
157 kvm_svm->avic_vm_id = vm_id;
158 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
159 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
164 avic_vm_destroy(kvm);
168 void avic_init_vmcb(struct vcpu_svm *svm)
170 struct vmcb *vmcb = svm->vmcb;
171 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
172 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
173 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
174 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
176 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
177 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
178 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
179 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
180 vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE & VMCB_AVIC_APIC_BAR_MASK;
182 if (kvm_apicv_activated(svm->vcpu.kvm))
183 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
185 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
188 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
191 u64 *avic_physical_id_table;
192 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
194 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
197 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
199 return &avic_physical_id_table[index];
204 * AVIC hardware walks the nested page table to check permissions,
205 * but does not use the SPA address specified in the leaf page
206 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
207 * field of the VMCB. Therefore, we set up the
208 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
210 static int avic_alloc_access_page(struct kvm *kvm)
215 mutex_lock(&kvm->slots_lock);
217 if (kvm->arch.apic_access_memslot_enabled)
220 ret = __x86_set_memory_region(kvm,
221 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
222 APIC_DEFAULT_PHYS_BASE,
229 kvm->arch.apic_access_memslot_enabled = true;
231 mutex_unlock(&kvm->slots_lock);
235 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
237 u64 *entry, new_entry;
238 int id = vcpu->vcpu_id;
239 struct vcpu_svm *svm = to_svm(vcpu);
241 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
244 if (!vcpu->arch.apic->regs)
247 if (kvm_apicv_activated(vcpu->kvm)) {
250 ret = avic_alloc_access_page(vcpu->kvm);
255 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
257 /* Setting AVIC backing page address in the phy APIC ID table */
258 entry = avic_get_physical_id_entry(vcpu, id);
262 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
263 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
264 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
265 WRITE_ONCE(*entry, new_entry);
267 svm->avic_physical_id_cache = entry;
272 void avic_ring_doorbell(struct kvm_vcpu *vcpu)
275 * Note, the vCPU could get migrated to a different pCPU at any point,
276 * which could result in signalling the wrong/previous pCPU. But if
277 * that happens the vCPU is guaranteed to do a VMRUN (after being
278 * migrated) and thus will process pending interrupts, i.e. a doorbell
279 * is not needed (and the spurious one is harmless).
281 int cpu = READ_ONCE(vcpu->cpu);
283 if (cpu != get_cpu())
284 wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
288 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
291 struct kvm_vcpu *vcpu;
295 * Wake any target vCPUs that are blocking, i.e. waiting for a wake
296 * event. There's no need to signal doorbells, as hardware has handled
297 * vCPUs that were in guest at the time of the IPI, and vCPUs that have
298 * since entered the guest will have processed pending IRQs at VMRUN.
300 kvm_for_each_vcpu(i, vcpu, kvm) {
301 if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
302 GET_APIC_DEST_FIELD(icrh),
303 icrl & APIC_DEST_MASK)) {
304 vcpu->arch.apic->irr_pending = true;
305 svm_complete_interrupt_delivery(vcpu,
306 icrl & APIC_MODE_MASK,
307 icrl & APIC_INT_LEVELTRIG,
308 icrl & APIC_VECTOR_MASK);
313 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
315 struct vcpu_svm *svm = to_svm(vcpu);
316 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
317 u32 icrl = svm->vmcb->control.exit_info_1;
318 u32 id = svm->vmcb->control.exit_info_2 >> 32;
319 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
320 struct kvm_lapic *apic = vcpu->arch.apic;
322 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
325 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
327 * Emulate IPIs that are not handled by AVIC hardware, which
328 * only virtualizes Fixed, Edge-Triggered INTRs. The exit is
329 * a trap, e.g. ICR holds the correct value and RIP has been
330 * advanced, KVM is responsible only for emulating the IPI.
331 * Sadly, hardware may sometimes leave the BUSY flag set, in
332 * which case KVM needs to emulate the ICR write as well in
333 * order to clear the BUSY flag.
335 if (icrl & APIC_ICR_BUSY)
336 kvm_apic_write_nodecode(vcpu, APIC_ICR);
338 kvm_apic_send_ipi(apic, icrl, icrh);
340 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
342 * At this point, we expect that the AVIC HW has already
343 * set the appropriate IRR bits on the valid target
344 * vcpus. So, we just need to kick the appropriate vcpu.
346 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
348 case AVIC_IPI_FAILURE_INVALID_TARGET:
350 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
351 WARN_ONCE(1, "Invalid backing page\n");
354 pr_err("Unknown IPI interception\n");
360 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
362 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
364 u32 *logical_apic_id_table;
365 int dlid = GET_APIC_LOGICAL_ID(ldr);
370 if (flat) { /* flat */
371 index = ffs(dlid) - 1;
374 } else { /* cluster */
375 int cluster = (dlid & 0xf0) >> 4;
376 int apic = ffs(dlid & 0x0f) - 1;
378 if ((apic < 0) || (apic > 7) ||
381 index = (cluster << 2) + apic;
384 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
386 return &logical_apic_id_table[index];
389 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
392 u32 *entry, new_entry;
394 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
395 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
399 new_entry = READ_ONCE(*entry);
400 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
401 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
402 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
403 WRITE_ONCE(*entry, new_entry);
408 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
410 struct vcpu_svm *svm = to_svm(vcpu);
411 bool flat = svm->dfr_reg == APIC_DFR_FLAT;
412 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
415 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
418 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
421 struct vcpu_svm *svm = to_svm(vcpu);
422 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
423 u32 id = kvm_xapic_id(vcpu->arch.apic);
425 if (ldr == svm->ldr_reg)
428 avic_invalidate_logical_id_entry(vcpu);
431 ret = avic_ldr_write(vcpu, id, ldr);
439 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
442 struct vcpu_svm *svm = to_svm(vcpu);
443 u32 id = kvm_xapic_id(vcpu->arch.apic);
445 if (vcpu->vcpu_id == id)
448 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
449 new = avic_get_physical_id_entry(vcpu, id);
453 /* We need to move physical_id_entry to new offset */
456 to_svm(vcpu)->avic_physical_id_cache = new;
459 * Also update the guest physical APIC ID in the logical
460 * APIC ID table entry if already setup the LDR.
463 avic_handle_ldr_update(vcpu);
468 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
470 struct vcpu_svm *svm = to_svm(vcpu);
471 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
473 if (svm->dfr_reg == dfr)
476 avic_invalidate_logical_id_entry(vcpu);
480 static int avic_unaccel_trap_write(struct kvm_vcpu *vcpu)
482 u32 offset = to_svm(vcpu)->vmcb->control.exit_info_1 &
483 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
487 if (avic_handle_apic_id_update(vcpu))
491 if (avic_handle_ldr_update(vcpu))
495 avic_handle_dfr_update(vcpu);
501 kvm_apic_write_nodecode(vcpu, offset);
505 static bool is_avic_unaccelerated_access_trap(u32 offset)
534 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
536 struct vcpu_svm *svm = to_svm(vcpu);
538 u32 offset = svm->vmcb->control.exit_info_1 &
539 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
540 u32 vector = svm->vmcb->control.exit_info_2 &
541 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
542 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
543 AVIC_UNACCEL_ACCESS_WRITE_MASK;
544 bool trap = is_avic_unaccelerated_access_trap(offset);
546 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
547 trap, write, vector);
550 WARN_ONCE(!write, "svm: Handling trap read.\n");
551 ret = avic_unaccel_trap_write(vcpu);
554 ret = kvm_emulate_instruction(vcpu, 0);
560 int avic_init_vcpu(struct vcpu_svm *svm)
563 struct kvm_vcpu *vcpu = &svm->vcpu;
565 if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
568 ret = avic_init_backing_page(vcpu);
572 INIT_LIST_HEAD(&svm->ir_list);
573 spin_lock_init(&svm->ir_list_lock);
574 svm->dfr_reg = APIC_DFR_FLAT;
579 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu)
581 if (avic_handle_apic_id_update(vcpu) != 0)
583 avic_handle_dfr_update(vcpu);
584 avic_handle_ldr_update(vcpu);
587 static int avic_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
591 struct amd_svm_iommu_ir *ir;
592 struct vcpu_svm *svm = to_svm(vcpu);
594 if (!kvm_arch_has_assigned_device(vcpu->kvm))
598 * Here, we go through the per-vcpu ir_list to update all existing
599 * interrupt remapping table entry targeting this vcpu.
601 spin_lock_irqsave(&svm->ir_list_lock, flags);
603 if (list_empty(&svm->ir_list))
606 list_for_each_entry(ir, &svm->ir_list, node) {
608 ret = amd_iommu_activate_guest_mode(ir->data);
610 ret = amd_iommu_deactivate_guest_mode(ir->data);
615 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
619 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
621 struct vcpu_svm *svm = to_svm(vcpu);
622 struct vmcb *vmcb = svm->vmcb01.ptr;
623 bool activated = kvm_vcpu_apicv_active(vcpu);
630 * During AVIC temporary deactivation, guest could update
631 * APIC ID, DFR and LDR registers, which would not be trapped
632 * by avic_unaccelerated_access_interception(). In this case,
633 * we need to check and update the AVIC logical APIC ID table
634 * accordingly before re-activating.
636 avic_apicv_post_state_restore(vcpu);
637 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
639 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
641 vmcb_mark_dirty(vmcb, VMCB_AVIC);
644 avic_vcpu_load(vcpu, vcpu->cpu);
648 avic_set_pi_irte_mode(vcpu, activated);
651 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
654 struct amd_svm_iommu_ir *cur;
656 spin_lock_irqsave(&svm->ir_list_lock, flags);
657 list_for_each_entry(cur, &svm->ir_list, node) {
658 if (cur->data != pi->ir_data)
660 list_del(&cur->node);
664 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
667 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
671 struct amd_svm_iommu_ir *ir;
674 * In some cases, the existing irte is updated and re-set,
675 * so we need to check here if it's already been * added
678 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
679 struct kvm *kvm = svm->vcpu.kvm;
680 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
681 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
682 struct vcpu_svm *prev_svm;
689 prev_svm = to_svm(prev_vcpu);
690 svm_ir_list_del(prev_svm, pi);
694 * Allocating new amd_iommu_pi_data, which will get
695 * add to the per-vcpu ir_list.
697 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
702 ir->data = pi->ir_data;
704 spin_lock_irqsave(&svm->ir_list_lock, flags);
705 list_add(&ir->node, &svm->ir_list);
706 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
713 * The HW cannot support posting multicast/broadcast
714 * interrupts to a vCPU. So, we still use legacy interrupt
715 * remapping for these kind of interrupts.
717 * For lowest-priority interrupts, we only support
718 * those with single CPU as the destination, e.g. user
719 * configures the interrupts via /proc/irq or uses
720 * irqbalance to make the interrupts single-CPU.
723 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
724 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
726 struct kvm_lapic_irq irq;
727 struct kvm_vcpu *vcpu = NULL;
729 kvm_set_msi_irq(kvm, e, &irq);
731 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
732 !kvm_irq_is_postable(&irq)) {
733 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
734 __func__, irq.vector);
738 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
741 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
742 vcpu_info->vector = irq.vector;
748 * avic_pi_update_irte - set IRTE for Posted-Interrupts
751 * @host_irq: host irq of the interrupt
752 * @guest_irq: gsi of the interrupt
753 * @set: set or unset PI
754 * returns 0 on success, < 0 on failure
756 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
757 uint32_t guest_irq, bool set)
759 struct kvm_kernel_irq_routing_entry *e;
760 struct kvm_irq_routing_table *irq_rt;
761 int idx, ret = -EINVAL;
763 if (!kvm_arch_has_assigned_device(kvm) ||
764 !irq_remapping_cap(IRQ_POSTING_CAP))
767 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
768 __func__, host_irq, guest_irq, set);
770 idx = srcu_read_lock(&kvm->irq_srcu);
771 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
772 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
774 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
775 struct vcpu_data vcpu_info;
776 struct vcpu_svm *svm = NULL;
778 if (e->type != KVM_IRQ_ROUTING_MSI)
782 * Here, we setup with legacy mode in the following cases:
783 * 1. When cannot target interrupt to a specific vcpu.
784 * 2. Unsetting posted interrupt.
785 * 3. APIC virtualization is disabled for the vcpu.
786 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
788 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
789 kvm_vcpu_apicv_active(&svm->vcpu)) {
790 struct amd_iommu_pi_data pi;
792 /* Try to enable guest_mode in IRTE */
793 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
795 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
797 pi.is_guest_mode = true;
798 pi.vcpu_data = &vcpu_info;
799 ret = irq_set_vcpu_affinity(host_irq, &pi);
802 * Here, we successfully setting up vcpu affinity in
803 * IOMMU guest mode. Now, we need to store the posted
804 * interrupt information in a per-vcpu ir_list so that
805 * we can reference to them directly when we update vcpu
806 * scheduling information in IOMMU irte.
808 if (!ret && pi.is_guest_mode)
809 svm_ir_list_add(svm, &pi);
811 /* Use legacy mode in IRTE */
812 struct amd_iommu_pi_data pi;
815 * Here, pi is used to:
816 * - Tell IOMMU to use legacy mode for this interrupt.
817 * - Retrieve ga_tag of prior interrupt remapping data.
820 pi.is_guest_mode = false;
821 ret = irq_set_vcpu_affinity(host_irq, &pi);
824 * Check if the posted interrupt was previously
825 * setup with the guest_mode by checking if the ga_tag
826 * was cached. If so, we need to clean up the per-vcpu
829 if (!ret && pi.prev_ga_tag) {
830 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
831 struct kvm_vcpu *vcpu;
833 vcpu = kvm_get_vcpu_by_id(kvm, id);
835 svm_ir_list_del(to_svm(vcpu), &pi);
840 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
841 e->gsi, vcpu_info.vector,
842 vcpu_info.pi_desc_addr, set);
846 pr_err("%s: failed to update PI IRTE\n", __func__);
853 srcu_read_unlock(&kvm->irq_srcu, idx);
857 bool avic_check_apicv_inhibit_reasons(ulong bit)
859 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
860 BIT(APICV_INHIBIT_REASON_ABSENT) |
861 BIT(APICV_INHIBIT_REASON_HYPERV) |
862 BIT(APICV_INHIBIT_REASON_NESTED) |
863 BIT(APICV_INHIBIT_REASON_IRQWIN) |
864 BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
865 BIT(APICV_INHIBIT_REASON_X2APIC) |
866 BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
868 return supported & BIT(bit);
873 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
877 struct amd_svm_iommu_ir *ir;
878 struct vcpu_svm *svm = to_svm(vcpu);
880 if (!kvm_arch_has_assigned_device(vcpu->kvm))
884 * Here, we go through the per-vcpu ir_list to update all existing
885 * interrupt remapping table entry targeting this vcpu.
887 spin_lock_irqsave(&svm->ir_list_lock, flags);
889 if (list_empty(&svm->ir_list))
892 list_for_each_entry(ir, &svm->ir_list, node) {
893 ret = amd_iommu_update_ga(cpu, r, ir->data);
898 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
902 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
905 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
906 int h_physical_id = kvm_cpu_get_apicid(cpu);
907 struct vcpu_svm *svm = to_svm(vcpu);
909 lockdep_assert_preemption_disabled();
912 * Since the host physical APIC id is 8 bits,
913 * we can support host APIC ID upto 255.
915 if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
919 * No need to update anything if the vCPU is blocking, i.e. if the vCPU
920 * is being scheduled in after being preempted. The CPU entries in the
921 * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
922 * If the vCPU was migrated, its new CPU value will be stuffed when the
925 if (kvm_vcpu_is_blocking(vcpu))
928 entry = READ_ONCE(*(svm->avic_physical_id_cache));
929 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
931 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
932 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
933 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
935 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
936 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
939 void avic_vcpu_put(struct kvm_vcpu *vcpu)
942 struct vcpu_svm *svm = to_svm(vcpu);
944 lockdep_assert_preemption_disabled();
946 entry = READ_ONCE(*(svm->avic_physical_id_cache));
948 /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
949 if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
952 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
954 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
955 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
958 void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
960 if (!kvm_vcpu_apicv_active(vcpu))
966 * Unload the AVIC when the vCPU is about to block, _before_
967 * the vCPU actually blocks.
969 * Any IRQs that arrive before IsRunning=0 will not cause an
970 * incomplete IPI vmexit on the source, therefore vIRR will also
971 * be checked by kvm_vcpu_check_block() before blocking. The
972 * memory barrier implicit in set_current_state orders writing
973 * IsRunning=0 before reading the vIRR. The processor needs a
974 * matching memory barrier on interrupt delivery between writing
975 * IRR and reading IsRunning; the lack of this barrier might be
976 * the cause of errata #1235).
983 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
987 if (!kvm_vcpu_apicv_active(vcpu))
991 WARN_ON(cpu != vcpu->cpu);
993 avic_vcpu_load(vcpu, cpu);