1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
15 #define pr_fmt(fmt) "SVM: " fmt
17 #include <linux/kvm_types.h>
18 #include <linux/hashtable.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/kvm_host.h>
22 #include <asm/irq_remapping.h>
30 /* enable / disable AVIC */
32 module_param(avic, bool, S_IRUGO);
34 #define SVM_AVIC_DOORBELL 0xc001011b
36 #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
39 * 0xff is broadcast, so the max index allowed for physical APIC ID
40 * table is 0xfe. APIC IDs above 0xff are reserved.
42 #define AVIC_MAX_PHYSICAL_ID_COUNT 255
44 #define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
45 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
46 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
48 /* AVIC GATAG is encoded using VM and VCPU IDs */
49 #define AVIC_VCPU_ID_BITS 8
50 #define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
52 #define AVIC_VM_ID_BITS 24
53 #define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
54 #define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
56 #define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
57 (y & AVIC_VCPU_ID_MASK))
58 #define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
59 #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
62 * This hash table is used to map VM_ID to a struct kvm_svm,
63 * when handling AMD IOMMU GALOG notification to schedule in
66 #define SVM_VM_DATA_HASH_BITS 8
67 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
68 static u32 next_vm_id = 0;
69 static bool next_vm_id_wrapped = 0;
70 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
73 * This is a wrapper of struct amd_iommu_ir_data.
75 struct amd_svm_iommu_ir {
76 struct list_head node; /* Used by SVM for per-vcpu ir_list */
77 void *data; /* Storing pointer to struct amd_ir_data */
80 enum avic_ipi_failure_cause {
81 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
82 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
83 AVIC_IPI_FAILURE_INVALID_TARGET,
84 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
88 * This function is called from IOMMU driver to notify
89 * SVM to schedule in a particular vCPU of a particular VM.
91 int avic_ga_log_notifier(u32 ga_tag)
94 struct kvm_svm *kvm_svm;
95 struct kvm_vcpu *vcpu = NULL;
96 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
97 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
99 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
100 trace_kvm_avic_ga_log(vm_id, vcpu_id);
102 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
103 hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
104 if (kvm_svm->avic_vm_id != vm_id)
106 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
109 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
112 * At this point, the IOMMU should have already set the pending
113 * bit in the vAPIC backing page. So, we just need to schedule
117 kvm_vcpu_wake_up(vcpu);
122 void avic_vm_destroy(struct kvm *kvm)
125 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
130 if (kvm_svm->avic_logical_id_table_page)
131 __free_page(kvm_svm->avic_logical_id_table_page);
132 if (kvm_svm->avic_physical_id_table_page)
133 __free_page(kvm_svm->avic_physical_id_table_page);
135 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
136 hash_del(&kvm_svm->hnode);
137 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
140 int avic_vm_init(struct kvm *kvm)
144 struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
153 /* Allocating physical APIC ID table (4KB) */
154 p_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
158 kvm_svm->avic_physical_id_table_page = p_page;
160 /* Allocating logical APIC ID table (4KB) */
161 l_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
165 kvm_svm->avic_logical_id_table_page = l_page;
167 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
169 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
170 if (vm_id == 0) { /* id is 1-based, zero is not okay */
171 next_vm_id_wrapped = 1;
174 /* Is it still in use? Only possible if wrapped at least once */
175 if (next_vm_id_wrapped) {
176 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
177 if (k2->avic_vm_id == vm_id)
181 kvm_svm->avic_vm_id = vm_id;
182 hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
183 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
188 avic_vm_destroy(kvm);
192 void avic_init_vmcb(struct vcpu_svm *svm)
194 struct vmcb *vmcb = svm->vmcb;
195 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
196 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
197 phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
198 phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
200 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
201 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
202 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
203 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
204 if (kvm_apicv_activated(svm->vcpu.kvm))
205 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
207 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
210 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
213 u64 *avic_physical_id_table;
214 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
216 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
219 avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
221 return &avic_physical_id_table[index];
226 * AVIC hardware walks the nested page table to check permissions,
227 * but does not use the SPA address specified in the leaf page
228 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
229 * field of the VMCB. Therefore, we set up the
230 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
232 static int avic_update_access_page(struct kvm *kvm, bool activate)
237 mutex_lock(&kvm->slots_lock);
239 * During kvm_destroy_vm(), kvm_pit_set_reinject() could trigger
240 * APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
241 * memory region. So, we need to ensure that kvm->mm == current->mm.
243 if ((kvm->arch.apic_access_page_done == activate) ||
244 (kvm->mm != current->mm))
247 ret = __x86_set_memory_region(kvm,
248 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
249 APIC_DEFAULT_PHYS_BASE,
250 activate ? PAGE_SIZE : 0);
256 kvm->arch.apic_access_page_done = activate;
258 mutex_unlock(&kvm->slots_lock);
262 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
264 u64 *entry, new_entry;
265 int id = vcpu->vcpu_id;
266 struct vcpu_svm *svm = to_svm(vcpu);
268 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
271 if (!vcpu->arch.apic->regs)
274 if (kvm_apicv_activated(vcpu->kvm)) {
277 ret = avic_update_access_page(vcpu->kvm, true);
282 svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
284 /* Setting AVIC backing page address in the phy APIC ID table */
285 entry = avic_get_physical_id_entry(vcpu, id);
289 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
290 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
291 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
292 WRITE_ONCE(*entry, new_entry);
294 svm->avic_physical_id_cache = entry;
299 static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
302 struct kvm_vcpu *vcpu;
305 kvm_for_each_vcpu(i, vcpu, kvm) {
306 bool m = kvm_apic_match_dest(vcpu, source,
307 icrl & APIC_SHORT_MASK,
308 GET_APIC_DEST_FIELD(icrh),
309 icrl & APIC_DEST_MASK);
311 if (m && !avic_vcpu_is_running(vcpu))
312 kvm_vcpu_wake_up(vcpu);
316 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
318 struct vcpu_svm *svm = to_svm(vcpu);
319 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
320 u32 icrl = svm->vmcb->control.exit_info_1;
321 u32 id = svm->vmcb->control.exit_info_2 >> 32;
322 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
323 struct kvm_lapic *apic = vcpu->arch.apic;
325 trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
328 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
330 * AVIC hardware handles the generation of
331 * IPIs when the specified Message Type is Fixed
332 * (also known as fixed delivery mode) and
333 * the Trigger Mode is edge-triggered. The hardware
334 * also supports self and broadcast delivery modes
335 * specified via the Destination Shorthand(DSH)
336 * field of the ICRL. Logical and physical APIC ID
337 * formats are supported. All other IPI types cause
338 * a #VMEXIT, which needs to emulated.
340 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
341 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
343 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING:
345 * At this point, we expect that the AVIC HW has already
346 * set the appropriate IRR bits on the valid target
347 * vcpus. So, we just need to kick the appropriate vcpu.
349 avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
351 case AVIC_IPI_FAILURE_INVALID_TARGET:
352 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
353 index, vcpu->vcpu_id, icrh, icrl);
355 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
356 WARN_ONCE(1, "Invalid backing page\n");
359 pr_err("Unknown IPI interception\n");
365 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
367 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
369 u32 *logical_apic_id_table;
370 int dlid = GET_APIC_LOGICAL_ID(ldr);
375 if (flat) { /* flat */
376 index = ffs(dlid) - 1;
379 } else { /* cluster */
380 int cluster = (dlid & 0xf0) >> 4;
381 int apic = ffs(dlid & 0x0f) - 1;
383 if ((apic < 0) || (apic > 7) ||
386 index = (cluster << 2) + apic;
389 logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
391 return &logical_apic_id_table[index];
394 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
397 u32 *entry, new_entry;
399 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
400 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
404 new_entry = READ_ONCE(*entry);
405 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
406 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
407 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
408 WRITE_ONCE(*entry, new_entry);
413 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
415 struct vcpu_svm *svm = to_svm(vcpu);
416 bool flat = svm->dfr_reg == APIC_DFR_FLAT;
417 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
420 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
423 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
426 struct vcpu_svm *svm = to_svm(vcpu);
427 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
428 u32 id = kvm_xapic_id(vcpu->arch.apic);
430 if (ldr == svm->ldr_reg)
433 avic_invalidate_logical_id_entry(vcpu);
436 ret = avic_ldr_write(vcpu, id, ldr);
444 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
447 struct vcpu_svm *svm = to_svm(vcpu);
448 u32 id = kvm_xapic_id(vcpu->arch.apic);
450 if (vcpu->vcpu_id == id)
453 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
454 new = avic_get_physical_id_entry(vcpu, id);
458 /* We need to move physical_id_entry to new offset */
461 to_svm(vcpu)->avic_physical_id_cache = new;
464 * Also update the guest physical APIC ID in the logical
465 * APIC ID table entry if already setup the LDR.
468 avic_handle_ldr_update(vcpu);
473 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
475 struct vcpu_svm *svm = to_svm(vcpu);
476 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
478 if (svm->dfr_reg == dfr)
481 avic_invalidate_logical_id_entry(vcpu);
485 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
487 struct kvm_lapic *apic = svm->vcpu.arch.apic;
488 u32 offset = svm->vmcb->control.exit_info_1 &
489 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
493 if (avic_handle_apic_id_update(&svm->vcpu))
497 if (avic_handle_ldr_update(&svm->vcpu))
501 avic_handle_dfr_update(&svm->vcpu);
507 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
512 static bool is_avic_unaccelerated_access_trap(u32 offset)
541 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
543 struct vcpu_svm *svm = to_svm(vcpu);
545 u32 offset = svm->vmcb->control.exit_info_1 &
546 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
547 u32 vector = svm->vmcb->control.exit_info_2 &
548 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
549 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
550 AVIC_UNACCEL_ACCESS_WRITE_MASK;
551 bool trap = is_avic_unaccelerated_access_trap(offset);
553 trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
554 trap, write, vector);
557 WARN_ONCE(!write, "svm: Handling trap read.\n");
558 ret = avic_unaccel_trap_write(svm);
561 ret = kvm_emulate_instruction(vcpu, 0);
567 int avic_init_vcpu(struct vcpu_svm *svm)
570 struct kvm_vcpu *vcpu = &svm->vcpu;
572 if (!avic || !irqchip_in_kernel(vcpu->kvm))
575 ret = avic_init_backing_page(vcpu);
579 INIT_LIST_HEAD(&svm->ir_list);
580 spin_lock_init(&svm->ir_list_lock);
581 svm->dfr_reg = APIC_DFR_FLAT;
586 void avic_post_state_restore(struct kvm_vcpu *vcpu)
588 if (avic_handle_apic_id_update(vcpu) != 0)
590 avic_handle_dfr_update(vcpu);
591 avic_handle_ldr_update(vcpu);
594 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
596 if (!avic || !lapic_in_kernel(vcpu))
599 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
600 kvm_request_apicv_update(vcpu->kvm, activate,
601 APICV_INHIBIT_REASON_IRQWIN);
602 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
605 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
610 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
614 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
618 static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
622 struct amd_svm_iommu_ir *ir;
623 struct vcpu_svm *svm = to_svm(vcpu);
625 if (!kvm_arch_has_assigned_device(vcpu->kvm))
629 * Here, we go through the per-vcpu ir_list to update all existing
630 * interrupt remapping table entry targeting this vcpu.
632 spin_lock_irqsave(&svm->ir_list_lock, flags);
634 if (list_empty(&svm->ir_list))
637 list_for_each_entry(ir, &svm->ir_list, node) {
639 ret = amd_iommu_activate_guest_mode(ir->data);
641 ret = amd_iommu_deactivate_guest_mode(ir->data);
646 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
650 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
652 struct vcpu_svm *svm = to_svm(vcpu);
653 struct vmcb *vmcb = svm->vmcb;
654 bool activated = kvm_vcpu_apicv_active(vcpu);
661 * During AVIC temporary deactivation, guest could update
662 * APIC ID, DFR and LDR registers, which would not be trapped
663 * by avic_unaccelerated_access_interception(). In this case,
664 * we need to check and update the AVIC logical APIC ID table
665 * accordingly before re-activating.
667 avic_post_state_restore(vcpu);
668 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
670 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
672 vmcb_mark_dirty(vmcb, VMCB_AVIC);
674 svm_set_pi_irte_mode(vcpu, activated);
677 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
682 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
684 if (!vcpu->arch.apicv_active)
687 kvm_lapic_set_irr(vec, vcpu->arch.apic);
688 smp_mb__after_atomic();
690 if (avic_vcpu_is_running(vcpu)) {
691 int cpuid = vcpu->cpu;
693 if (cpuid != get_cpu())
694 wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
697 kvm_vcpu_wake_up(vcpu);
702 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
707 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
710 struct amd_svm_iommu_ir *cur;
712 spin_lock_irqsave(&svm->ir_list_lock, flags);
713 list_for_each_entry(cur, &svm->ir_list, node) {
714 if (cur->data != pi->ir_data)
716 list_del(&cur->node);
720 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
723 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
727 struct amd_svm_iommu_ir *ir;
730 * In some cases, the existing irte is updated and re-set,
731 * so we need to check here if it's already been * added
734 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
735 struct kvm *kvm = svm->vcpu.kvm;
736 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
737 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
738 struct vcpu_svm *prev_svm;
745 prev_svm = to_svm(prev_vcpu);
746 svm_ir_list_del(prev_svm, pi);
750 * Allocating new amd_iommu_pi_data, which will get
751 * add to the per-vcpu ir_list.
753 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
758 ir->data = pi->ir_data;
760 spin_lock_irqsave(&svm->ir_list_lock, flags);
761 list_add(&ir->node, &svm->ir_list);
762 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
769 * The HW cannot support posting multicast/broadcast
770 * interrupts to a vCPU. So, we still use legacy interrupt
771 * remapping for these kind of interrupts.
773 * For lowest-priority interrupts, we only support
774 * those with single CPU as the destination, e.g. user
775 * configures the interrupts via /proc/irq or uses
776 * irqbalance to make the interrupts single-CPU.
779 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
780 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
782 struct kvm_lapic_irq irq;
783 struct kvm_vcpu *vcpu = NULL;
785 kvm_set_msi_irq(kvm, e, &irq);
787 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
788 !kvm_irq_is_postable(&irq)) {
789 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
790 __func__, irq.vector);
794 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
797 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
798 vcpu_info->vector = irq.vector;
804 * svm_update_pi_irte - set IRTE for Posted-Interrupts
807 * @host_irq: host irq of the interrupt
808 * @guest_irq: gsi of the interrupt
809 * @set: set or unset PI
810 * returns 0 on success, < 0 on failure
812 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
813 uint32_t guest_irq, bool set)
815 struct kvm_kernel_irq_routing_entry *e;
816 struct kvm_irq_routing_table *irq_rt;
817 int idx, ret = -EINVAL;
819 if (!kvm_arch_has_assigned_device(kvm) ||
820 !irq_remapping_cap(IRQ_POSTING_CAP))
823 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
824 __func__, host_irq, guest_irq, set);
826 idx = srcu_read_lock(&kvm->irq_srcu);
827 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
828 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
830 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
831 struct vcpu_data vcpu_info;
832 struct vcpu_svm *svm = NULL;
834 if (e->type != KVM_IRQ_ROUTING_MSI)
838 * Here, we setup with legacy mode in the following cases:
839 * 1. When cannot target interrupt to a specific vcpu.
840 * 2. Unsetting posted interrupt.
841 * 3. APIC virtualization is disabled for the vcpu.
842 * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
844 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
845 kvm_vcpu_apicv_active(&svm->vcpu)) {
846 struct amd_iommu_pi_data pi;
848 /* Try to enable guest_mode in IRTE */
849 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
851 pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
853 pi.is_guest_mode = true;
854 pi.vcpu_data = &vcpu_info;
855 ret = irq_set_vcpu_affinity(host_irq, &pi);
858 * Here, we successfully setting up vcpu affinity in
859 * IOMMU guest mode. Now, we need to store the posted
860 * interrupt information in a per-vcpu ir_list so that
861 * we can reference to them directly when we update vcpu
862 * scheduling information in IOMMU irte.
864 if (!ret && pi.is_guest_mode)
865 svm_ir_list_add(svm, &pi);
867 /* Use legacy mode in IRTE */
868 struct amd_iommu_pi_data pi;
871 * Here, pi is used to:
872 * - Tell IOMMU to use legacy mode for this interrupt.
873 * - Retrieve ga_tag of prior interrupt remapping data.
876 pi.is_guest_mode = false;
877 ret = irq_set_vcpu_affinity(host_irq, &pi);
880 * Check if the posted interrupt was previously
881 * setup with the guest_mode by checking if the ga_tag
882 * was cached. If so, we need to clean up the per-vcpu
885 if (!ret && pi.prev_ga_tag) {
886 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
887 struct kvm_vcpu *vcpu;
889 vcpu = kvm_get_vcpu_by_id(kvm, id);
891 svm_ir_list_del(to_svm(vcpu), &pi);
896 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
897 e->gsi, vcpu_info.vector,
898 vcpu_info.pi_desc_addr, set);
902 pr_err("%s: failed to update PI IRTE\n", __func__);
909 srcu_read_unlock(&kvm->irq_srcu, idx);
913 bool svm_check_apicv_inhibit_reasons(ulong bit)
915 ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
916 BIT(APICV_INHIBIT_REASON_HYPERV) |
917 BIT(APICV_INHIBIT_REASON_NESTED) |
918 BIT(APICV_INHIBIT_REASON_IRQWIN) |
919 BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
920 BIT(APICV_INHIBIT_REASON_X2APIC);
922 return supported & BIT(bit);
925 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate)
927 avic_update_access_page(kvm, activate);
931 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
935 struct amd_svm_iommu_ir *ir;
936 struct vcpu_svm *svm = to_svm(vcpu);
938 if (!kvm_arch_has_assigned_device(vcpu->kvm))
942 * Here, we go through the per-vcpu ir_list to update all existing
943 * interrupt remapping table entry targeting this vcpu.
945 spin_lock_irqsave(&svm->ir_list_lock, flags);
947 if (list_empty(&svm->ir_list))
950 list_for_each_entry(ir, &svm->ir_list, node) {
951 ret = amd_iommu_update_ga(cpu, r, ir->data);
956 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
960 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
963 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
964 int h_physical_id = kvm_cpu_get_apicid(cpu);
965 struct vcpu_svm *svm = to_svm(vcpu);
967 if (!kvm_vcpu_apicv_active(vcpu))
971 * Since the host physical APIC id is 8 bits,
972 * we can support host APIC ID upto 255.
974 if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
977 entry = READ_ONCE(*(svm->avic_physical_id_cache));
978 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
980 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
981 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
983 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
984 if (svm->avic_is_running)
985 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
987 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
988 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
989 svm->avic_is_running);
992 void avic_vcpu_put(struct kvm_vcpu *vcpu)
995 struct vcpu_svm *svm = to_svm(vcpu);
997 if (!kvm_vcpu_apicv_active(vcpu))
1000 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1001 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1002 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1004 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1005 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
1009 * This function is called during VCPU halt/unhalt.
1011 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1013 struct vcpu_svm *svm = to_svm(vcpu);
1015 svm->avic_is_running = is_run;
1017 avic_vcpu_load(vcpu, vcpu->cpu);
1019 avic_vcpu_put(vcpu);
1022 void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
1024 avic_set_running(vcpu, false);
1027 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
1029 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
1030 kvm_vcpu_update_apicv(vcpu);
1031 avic_set_running(vcpu, true);