1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
27 #define IOPM_SIZE PAGE_SIZE * 3
28 #define MSRPM_SIZE PAGE_SIZE * 2
30 #define MAX_DIRECT_ACCESS_MSRS 20
31 #define MSRPM_OFFSETS 16
32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
33 extern bool npt_enabled;
37 * VMCB_ALL_CLEAN_MASK might also need to
38 * be updated if this enum is modified.
41 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
43 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
45 VMCB_INTR, /* int_ctl, int_vector */
46 VMCB_NPT, /* npt_en, nCR3, gPAT */
47 VMCB_CR, /* CR0, CR3, CR4, EFER */
48 VMCB_DR, /* DR6, DR7 */
49 VMCB_DT, /* GDT, IDT */
50 VMCB_SEG, /* CS, DS, SS, ES, CPL */
51 VMCB_CR2, /* CR2 only */
52 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
53 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
54 * AVIC PHYSICAL_TABLE pointer,
55 * AVIC LOGICAL_TABLE pointer
57 VMCB_SW = 31, /* Reserved for hypervisor/software use */
60 #define VMCB_ALL_CLEAN_MASK ( \
61 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
62 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
63 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
64 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
65 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
68 /* TPR and CR2 are always written before VMRUN */
69 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
72 bool active; /* SEV enabled guest */
73 bool es_active; /* SEV-ES enabled guest */
74 unsigned int asid; /* ASID used for this guest */
75 unsigned int handle; /* SEV firmware handle */
76 int fd; /* SEV device fd */
77 unsigned long pages_locked; /* Number of pages locked */
78 struct list_head regions_list; /* List of registered regions */
79 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
80 struct kvm *enc_context_owner; /* Owner of copied encryption context */
81 struct misc_cg *misc_cg; /* For misc cgroup accounting */
87 /* Struct members for AVIC */
89 struct page *avic_logical_id_table_page;
90 struct page *avic_physical_id_table_page;
91 struct hlist_node hnode;
93 struct kvm_sev_info sev_info;
98 struct kvm_vmcb_info {
102 uint64_t asid_generation;
105 struct svm_nested_state {
106 struct kvm_vmcb_info vmcb02;
112 /* These are the merged vectors */
115 /* A VMRUN has started but has not yet been performed, so
116 * we cannot inject a nested vmexit yet. */
117 bool nested_run_pending;
119 /* cache for control fields of the guest */
120 struct vmcb_control_area ctl;
126 struct kvm_vcpu vcpu;
127 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
129 struct kvm_vmcb_info vmcb01;
130 struct kvm_vmcb_info *current_vmcb;
131 struct svm_cpu_data *svm_data;
143 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
144 * translated into the appropriate L2_CFG bits on the host to
145 * perform speculative control.
153 struct svm_nested_state nested;
156 u64 nmi_singlestep_guest_rflags;
158 unsigned int3_injected;
159 unsigned long int3_rip;
161 /* cached guest cpuid flags for faster access */
162 bool nrips_enabled : 1;
166 struct page *avic_backing_page;
167 u64 *avic_physical_id_cache;
168 bool avic_is_running;
171 * Per-vcpu list of struct amd_svm_iommu_ir:
172 * This is used mainly to store interrupt remapping information used
173 * when update the vcpu affinity. This avoids the need to scan for
174 * IRTE and try to match ga_tag in the IOMMU driver.
176 struct list_head ir_list;
177 spinlock_t ir_list_lock;
179 /* Save desired MSR intercept (read: pass-through) state */
181 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
182 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
183 } shadow_msr_intercept;
186 struct vmcb_save_area *vmsa;
188 struct kvm_host_map ghcb_map;
189 bool received_first_sipi;
191 /* SEV-ES scratch area support */
197 bool guest_state_loaded;
200 struct svm_cpu_data {
207 struct kvm_ldttss_desc *tss_desc;
209 struct page *save_area;
210 struct vmcb *current_vmcb;
212 /* index = sev_asid, value = vmcb pointer */
213 struct vmcb **sev_vmcbs;
216 DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
218 void recalc_intercepts(struct vcpu_svm *svm);
220 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
222 return container_of(kvm, struct kvm_svm, kvm);
225 static inline bool sev_guest(struct kvm *kvm)
227 #ifdef CONFIG_KVM_AMD_SEV
228 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
236 static inline bool sev_es_guest(struct kvm *kvm)
238 #ifdef CONFIG_KVM_AMD_SEV
239 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
241 return sev_guest(kvm) && sev->es_active;
247 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
249 vmcb->control.clean = 0;
252 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
254 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
255 & ~VMCB_ALWAYS_DIRTY_MASK;
258 static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
260 return (vmcb->control.clean & (1 << bit));
263 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
265 vmcb->control.clean &= ~(1 << bit);
268 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
270 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
273 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
275 return container_of(vcpu, struct vcpu_svm, vcpu);
278 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
280 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
281 __set_bit(bit, (unsigned long *)&control->intercepts);
284 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
286 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
287 __clear_bit(bit, (unsigned long *)&control->intercepts);
290 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
292 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
293 return test_bit(bit, (unsigned long *)&control->intercepts);
296 static inline void set_dr_intercepts(struct vcpu_svm *svm)
298 struct vmcb *vmcb = svm->vmcb01.ptr;
300 if (!sev_es_guest(svm->vcpu.kvm)) {
301 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
302 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
303 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
304 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
305 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
306 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
307 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
308 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
309 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
310 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
311 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
312 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
313 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
314 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
317 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
318 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
320 recalc_intercepts(svm);
323 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
325 struct vmcb *vmcb = svm->vmcb01.ptr;
327 vmcb->control.intercepts[INTERCEPT_DR] = 0;
329 /* DR7 access must remain intercepted for an SEV-ES guest */
330 if (sev_es_guest(svm->vcpu.kvm)) {
331 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
332 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
335 recalc_intercepts(svm);
338 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
340 struct vmcb *vmcb = svm->vmcb01.ptr;
342 WARN_ON_ONCE(bit >= 32);
343 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
345 recalc_intercepts(svm);
348 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
350 struct vmcb *vmcb = svm->vmcb01.ptr;
352 WARN_ON_ONCE(bit >= 32);
353 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
355 recalc_intercepts(svm);
358 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
360 struct vmcb *vmcb = svm->vmcb01.ptr;
362 vmcb_set_intercept(&vmcb->control, bit);
364 recalc_intercepts(svm);
367 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
369 struct vmcb *vmcb = svm->vmcb01.ptr;
371 vmcb_clr_intercept(&vmcb->control, bit);
373 recalc_intercepts(svm);
376 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
378 return vmcb_is_intercept(&svm->vmcb->control, bit);
381 static inline bool vgif_enabled(struct vcpu_svm *svm)
383 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
386 static inline void enable_gif(struct vcpu_svm *svm)
388 if (vgif_enabled(svm))
389 svm->vmcb->control.int_ctl |= V_GIF_MASK;
391 svm->vcpu.arch.hflags |= HF_GIF_MASK;
394 static inline void disable_gif(struct vcpu_svm *svm)
396 if (vgif_enabled(svm))
397 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
399 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
402 static inline bool gif_set(struct vcpu_svm *svm)
404 if (vgif_enabled(svm))
405 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
407 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
411 #define MSR_INVALID 0xffffffffU
413 extern bool dump_invalid_vmcb;
415 u32 svm_msrpm_offset(u32 msr);
416 u32 *svm_vcpu_alloc_msrpm(void);
417 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
418 void svm_vcpu_free_msrpm(u32 *msrpm);
420 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
421 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
422 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
423 void svm_flush_tlb(struct kvm_vcpu *vcpu);
424 void disable_nmi_singlestep(struct vcpu_svm *svm);
425 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
426 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
427 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
428 void svm_set_gif(struct vcpu_svm *svm, bool value);
429 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
430 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
431 int read, int write);
435 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
436 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
437 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
439 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
441 struct vcpu_svm *svm = to_svm(vcpu);
443 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
446 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
448 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
451 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
453 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
456 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
458 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
461 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
462 void svm_leave_nested(struct vcpu_svm *svm);
463 void svm_free_nested(struct vcpu_svm *svm);
464 int svm_allocate_nested(struct vcpu_svm *svm);
465 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
466 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
467 int nested_svm_vmexit(struct vcpu_svm *svm);
469 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
471 svm->vmcb->control.exit_code = exit_code;
472 svm->vmcb->control.exit_info_1 = 0;
473 svm->vmcb->control.exit_info_2 = 0;
474 return nested_svm_vmexit(svm);
477 int nested_svm_exit_handled(struct vcpu_svm *svm);
478 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
479 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
480 bool has_error_code, u32 error_code);
481 int nested_svm_exit_special(struct vcpu_svm *svm);
482 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
483 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
484 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
486 extern struct kvm_x86_nested_ops svm_nested_ops;
490 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
491 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
492 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
494 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
495 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
496 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
497 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
499 #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
501 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
503 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
504 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC);
507 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
509 struct vcpu_svm *svm = to_svm(vcpu);
510 u64 *entry = svm->avic_physical_id_cache;
515 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
518 int avic_ga_log_notifier(u32 ga_tag);
519 void avic_vm_destroy(struct kvm *kvm);
520 int avic_vm_init(struct kvm *kvm);
521 void avic_init_vmcb(struct vcpu_svm *svm);
522 void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
523 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
524 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
525 int avic_init_vcpu(struct vcpu_svm *svm);
526 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
527 void avic_vcpu_put(struct kvm_vcpu *vcpu);
528 void avic_post_state_restore(struct kvm_vcpu *vcpu);
529 void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
530 void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
531 bool svm_check_apicv_inhibit_reasons(ulong bit);
532 void svm_pre_update_apicv_exec_ctrl(struct kvm *kvm, bool activate);
533 void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
534 void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
535 void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
536 int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
537 bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
538 int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
539 uint32_t guest_irq, bool set);
540 void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
541 void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
545 #define GHCB_VERSION_MAX 1ULL
546 #define GHCB_VERSION_MIN 1ULL
549 extern unsigned int max_sev_asid;
551 void sev_vm_destroy(struct kvm *kvm);
552 int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
553 int svm_register_enc_region(struct kvm *kvm,
554 struct kvm_enc_region *range);
555 int svm_unregister_enc_region(struct kvm *kvm,
556 struct kvm_enc_region *range);
557 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
558 void pre_sev_run(struct vcpu_svm *svm, int cpu);
559 void __init sev_set_cpu_caps(void);
560 void __init sev_hardware_setup(void);
561 void sev_hardware_teardown(void);
562 int sev_cpu_init(struct svm_cpu_data *sd);
563 void sev_free_vcpu(struct kvm_vcpu *vcpu);
564 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
565 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
566 void sev_es_init_vmcb(struct vcpu_svm *svm);
567 void sev_es_create_vcpu(struct vcpu_svm *svm);
568 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
569 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
570 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
574 void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
575 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);