1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
23 static const u32 host_save_user_msrs[] = {
25 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
28 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
32 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
34 #define MSRPM_OFFSETS 16
35 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
36 extern bool npt_enabled;
39 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
41 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
43 VMCB_INTR, /* int_ctl, int_vector */
44 VMCB_NPT, /* npt_en, nCR3, gPAT */
45 VMCB_CR, /* CR0, CR3, CR4, EFER */
46 VMCB_DR, /* DR6, DR7 */
47 VMCB_DT, /* GDT, IDT */
48 VMCB_SEG, /* CS, DS, SS, ES, CPL */
49 VMCB_CR2, /* CR2 only */
50 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
51 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
52 * AVIC PHYSICAL_TABLE pointer,
53 * AVIC LOGICAL_TABLE pointer
58 /* TPR and CR2 are always written before VMRUN */
59 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
62 bool active; /* SEV enabled guest */
63 unsigned int asid; /* ASID used for this guest */
64 unsigned int handle; /* SEV firmware handle */
65 int fd; /* SEV device fd */
66 unsigned long pages_locked; /* Number of pages locked */
67 struct list_head regions_list; /* List of registered regions */
73 /* Struct members for AVIC */
75 struct page *avic_logical_id_table_page;
76 struct page *avic_physical_id_table_page;
77 struct hlist_node hnode;
79 struct kvm_sev_info sev_info;
89 u32 host_intercept_exceptions;
91 /* These are the merged vectors */
94 /* A VMRUN has started but has not yet been performed, so
95 * we cannot inject a nested vmexit yet. */
96 bool nested_run_pending;
98 /* cache for control fields of the guest */
99 struct vmcb_control_area ctl;
103 struct kvm_vcpu vcpu;
105 unsigned long vmcb_pa;
106 struct svm_cpu_data *svm_data;
107 uint64_t asid_generation;
108 uint64_t sysenter_esp;
109 uint64_t sysenter_eip;
116 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
126 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
127 * translated into the appropriate L2_CFG bits on the host to
128 * perform speculative control.
136 struct nested_state nested;
139 u64 nmi_singlestep_guest_rflags;
141 unsigned int3_injected;
142 unsigned long int3_rip;
144 /* cached guest cpuid flags for faster access */
145 bool nrips_enabled : 1;
149 struct page *avic_backing_page;
150 u64 *avic_physical_id_cache;
151 bool avic_is_running;
154 * Per-vcpu list of struct amd_svm_iommu_ir:
155 * This is used mainly to store interrupt remapping information used
156 * when update the vcpu affinity. This avoids the need to scan for
157 * IRTE and try to match ga_tag in the IOMMU driver.
159 struct list_head ir_list;
160 spinlock_t ir_list_lock;
163 struct svm_cpu_data {
170 struct kvm_ldttss_desc *tss_desc;
172 struct page *save_area;
173 struct vmcb *current_vmcb;
175 /* index = sev_asid, value = vmcb pointer */
176 struct vmcb **sev_vmcbs;
179 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
181 void recalc_intercepts(struct vcpu_svm *svm);
183 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
185 return container_of(kvm, struct kvm_svm, kvm);
188 static inline void mark_all_dirty(struct vmcb *vmcb)
190 vmcb->control.clean = 0;
193 static inline void mark_all_clean(struct vmcb *vmcb)
195 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
196 & ~VMCB_ALWAYS_DIRTY_MASK;
199 static inline void mark_dirty(struct vmcb *vmcb, int bit)
201 vmcb->control.clean &= ~(1 << bit);
204 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
206 return container_of(vcpu, struct vcpu_svm, vcpu);
209 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
211 if (is_guest_mode(&svm->vcpu))
212 return svm->nested.hsave;
217 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
219 struct vmcb *vmcb = get_host_vmcb(svm);
221 vmcb->control.intercept_cr |= (1U << bit);
223 recalc_intercepts(svm);
226 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
228 struct vmcb *vmcb = get_host_vmcb(svm);
230 vmcb->control.intercept_cr &= ~(1U << bit);
232 recalc_intercepts(svm);
235 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
237 struct vmcb *vmcb = get_host_vmcb(svm);
239 return vmcb->control.intercept_cr & (1U << bit);
242 static inline void set_dr_intercepts(struct vcpu_svm *svm)
244 struct vmcb *vmcb = get_host_vmcb(svm);
246 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
247 | (1 << INTERCEPT_DR1_READ)
248 | (1 << INTERCEPT_DR2_READ)
249 | (1 << INTERCEPT_DR3_READ)
250 | (1 << INTERCEPT_DR4_READ)
251 | (1 << INTERCEPT_DR5_READ)
252 | (1 << INTERCEPT_DR6_READ)
253 | (1 << INTERCEPT_DR7_READ)
254 | (1 << INTERCEPT_DR0_WRITE)
255 | (1 << INTERCEPT_DR1_WRITE)
256 | (1 << INTERCEPT_DR2_WRITE)
257 | (1 << INTERCEPT_DR3_WRITE)
258 | (1 << INTERCEPT_DR4_WRITE)
259 | (1 << INTERCEPT_DR5_WRITE)
260 | (1 << INTERCEPT_DR6_WRITE)
261 | (1 << INTERCEPT_DR7_WRITE);
263 recalc_intercepts(svm);
266 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
268 struct vmcb *vmcb = get_host_vmcb(svm);
270 vmcb->control.intercept_dr = 0;
272 recalc_intercepts(svm);
275 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
277 struct vmcb *vmcb = get_host_vmcb(svm);
279 vmcb->control.intercept_exceptions |= (1U << bit);
281 recalc_intercepts(svm);
284 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
286 struct vmcb *vmcb = get_host_vmcb(svm);
288 vmcb->control.intercept_exceptions &= ~(1U << bit);
290 recalc_intercepts(svm);
293 static inline void set_intercept(struct vcpu_svm *svm, int bit)
295 struct vmcb *vmcb = get_host_vmcb(svm);
297 vmcb->control.intercept |= (1ULL << bit);
299 recalc_intercepts(svm);
302 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
304 struct vmcb *vmcb = get_host_vmcb(svm);
306 vmcb->control.intercept &= ~(1ULL << bit);
308 recalc_intercepts(svm);
311 static inline bool is_intercept(struct vcpu_svm *svm, int bit)
313 return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
316 static inline bool vgif_enabled(struct vcpu_svm *svm)
318 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
321 static inline void enable_gif(struct vcpu_svm *svm)
323 if (vgif_enabled(svm))
324 svm->vmcb->control.int_ctl |= V_GIF_MASK;
326 svm->vcpu.arch.hflags |= HF_GIF_MASK;
329 static inline void disable_gif(struct vcpu_svm *svm)
331 if (vgif_enabled(svm))
332 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
334 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
337 static inline bool gif_set(struct vcpu_svm *svm)
339 if (vgif_enabled(svm))
340 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
342 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
346 #define MSR_INVALID 0xffffffffU
348 u32 svm_msrpm_offset(u32 msr);
349 void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
350 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
351 int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
352 void svm_flush_tlb(struct kvm_vcpu *vcpu);
353 void disable_nmi_singlestep(struct vcpu_svm *svm);
354 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
355 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
356 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
357 void svm_set_gif(struct vcpu_svm *svm, bool value);
361 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
362 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
363 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
365 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
367 struct vcpu_svm *svm = to_svm(vcpu);
369 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
372 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
374 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_SMI));
377 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
379 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INTR));
382 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
384 return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
387 void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
388 struct vmcb *nested_vmcb);
389 void svm_leave_nested(struct vcpu_svm *svm);
390 int nested_svm_vmrun(struct vcpu_svm *svm);
391 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
392 int nested_svm_vmexit(struct vcpu_svm *svm);
393 int nested_svm_exit_handled(struct vcpu_svm *svm);
394 int nested_svm_check_permissions(struct vcpu_svm *svm);
395 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
396 bool has_error_code, u32 error_code);
397 int nested_svm_exit_special(struct vcpu_svm *svm);
398 void sync_nested_vmcb_control(struct vcpu_svm *svm);
400 extern struct kvm_x86_nested_ops svm_nested_ops;
404 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
405 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
406 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
408 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
409 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
410 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
411 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
413 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
417 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
419 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
420 mark_dirty(svm->vmcb, VMCB_AVIC);
423 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
425 struct vcpu_svm *svm = to_svm(vcpu);
426 u64 *entry = svm->avic_physical_id_cache;
431 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
434 int avic_ga_log_notifier(u32 ga_tag);
435 void avic_vm_destroy(struct kvm *kvm);
436 int avic_vm_init(struct kvm *kvm);
437 void avic_init_vmcb(struct vcpu_svm *svm);
438 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
439 int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
440 int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
441 int avic_init_vcpu(struct vcpu_svm *svm);
442 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
443 void avic_vcpu_put(struct kvm_vcpu *vcpu);
444 void avic_post_state_restore(struct kvm_vcpu *vcpu);
445 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
446 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
447 bool svm_check_apicv_inhibit_reasons(ulong bit);
448 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
449 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
450 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
451 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
452 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
453 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
454 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
455 uint32_t guest_irq, bool set);
456 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
457 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
461 extern unsigned int max_sev_asid;
463 static inline bool sev_guest(struct kvm *kvm)
465 #ifdef CONFIG_KVM_AMD_SEV
466 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
474 static inline bool svm_sev_enabled(void)
476 return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
479 void sev_vm_destroy(struct kvm *kvm);
480 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
481 int svm_register_enc_region(struct kvm *kvm,
482 struct kvm_enc_region *range);
483 int svm_unregister_enc_region(struct kvm *kvm,
484 struct kvm_enc_region *range);
485 void pre_sev_run(struct vcpu_svm *svm, int cpu);
486 int __init sev_hardware_setup(void);
487 void sev_hardware_teardown(void);